DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] dmadev: introduce DMA device library
@ 2021-07-02 13:18 Chengwen Feng
  2021-07-02 13:59 ` Bruce Richardson
                   ` (29 more replies)
  0 siblings, 30 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-07-02 13:18 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, liangma

This patch introduces 'dmadevice' which is a generic type of DMA
device.

The APIs of dmadev library exposes some generic operations which can
enable configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
 MAINTAINERS                  |   4 +
 config/rte_config.h          |   3 +
 lib/dmadev/meson.build       |   6 +
 lib/dmadev/rte_dmadev.c      | 438 +++++++++++++++++++++
 lib/dmadev/rte_dmadev.h      | 919 +++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h |  98 +++++
 lib/dmadev/rte_dmadev_pmd.h  | 210 ++++++++++
 lib/dmadev/version.map       |  32 ++
 lib/meson.build              |   1 +
 9 files changed, 1711 insertions(+)
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 4347555..2019783 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -496,6 +496,10 @@ F: drivers/raw/skeleton/
 F: app/test/test_rawdev.c
 F: doc/guides/prog_guide/rawdev.rst
 
+Dma device API
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+
 
 Memory Pool Drivers
 -------------------
diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c..331a431 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -81,6 +81,9 @@
 /* rawdev defines */
 #define RTE_RAWDEV_MAX_DEVS 64
 
+/* dmadev defines */
+#define RTE_DMADEV_MAX_DEVS 64
+
 /* ip_fragmentation defines */
 #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
 #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000..c918dae
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+sources = files('rte_dmadev.c')
+headers = files('rte_dmadev.h', 'rte_dmadev_pmd.h')
+indirect_headers += files('rte_dmadev_core.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000..a94e839
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,438 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 HiSilicon Limited.
+ */
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
+
+uint16_t
+rte_dmadev_count(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].attached)
+			count++;
+	}
+
+	return count;
+}
+
+int
+rte_dmadev_get_dev_id(const char *name)
+{
+	uint16_t i;
+
+	if (name == NULL)
+		return -EINVAL;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++)
+		if ((strcmp(rte_dmadevices[i].name, name) == 0) &&
+		    (rte_dmadevices[i].attached == RTE_DMADEV_ATTACHED))
+			return i;
+
+	return -ENODEV;
+}
+
+int
+rte_dmadev_socket_id(uint16_t dev_id)
+{
+	struct rte_dmadev *dev;
+
+	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_dmadevices[dev_id];
+
+	return dev->socket_id;
+}
+
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
+{
+	struct rte_dmadev *dev;
+	int diag;
+
+	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(dev_info, -EINVAL);
+
+	dev = &rte_dmadevices[dev_id];
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+
+	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
+	diag = (*dev->dev_ops->dev_info_get)(dev, dev_info);
+	if (diag != 0)
+		return diag;
+
+	dev_info->device = dev->device;
+	dev_info->driver_name = dev->driver_name;
+	dev_info->socket_id = dev->socket_id;
+
+	return 0;
+}
+
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
+{
+	struct rte_dmadev *dev;
+	int diag;
+
+	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(dev_conf, -EINVAL);
+
+	dev = &rte_dmadevices[dev_id];
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+
+	if (dev->started) {
+		RTE_DMADEV_LOG(ERR,
+		   "device %u must be stopped to allow configuration", dev_id);
+		return -EBUSY;
+	}
+
+	diag = (*dev->dev_ops->dev_configure)(dev, dev_conf);
+	if (diag != 0)
+		RTE_DMADEV_LOG(ERR, "device %u dev_configure failed, ret = %d",
+			       dev_id, diag);
+	else
+		dev->attached = 1;
+
+	return diag;
+}
+
+int
+rte_dmadev_start(uint16_t dev_id)
+{
+	struct rte_dmadev *dev;
+	int diag;
+
+	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+
+	dev = &rte_dmadevices[dev_id];
+	if (dev->started != 0) {
+		RTE_DMADEV_LOG(ERR, "device %u already started", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	diag = (*dev->dev_ops->dev_start)(dev);
+	if (diag != 0)
+		return diag;
+
+mark_started:
+	dev->started = 1;
+	return 0;
+}
+
+int
+rte_dmadev_stop(uint16_t dev_id)
+{
+	struct rte_dmadev *dev;
+	int diag;
+
+	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+
+	dev = &rte_dmadevices[dev_id];
+
+	if (dev->started == 0) {
+		RTE_DMADEV_LOG(ERR, "device %u already stopped", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	diag = (*dev->dev_ops->dev_stop)(dev);
+	if (diag != 0)
+		return diag;
+
+mark_stopped:
+	dev->started = 0;
+	return 0;
+}
+
+int
+rte_dmadev_close(uint16_t dev_id)
+{
+	struct rte_dmadev *dev;
+
+	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+
+	dev = &rte_dmadevices[dev_id];
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->started == 1) {
+		RTE_DMADEV_LOG(ERR, "device %u must be stopped before closing",
+			       dev_id);
+		return -EBUSY;
+	}
+
+	return (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_dmadev_reset(uint16_t dev_id)
+{
+	struct rte_dmadev *dev;
+
+	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+
+	dev = &rte_dmadevices[dev_id];
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
+
+	/* Reset is not dependent on state of the device */
+	return (*dev->dev_ops->dev_reset)(dev);
+}
+
+int
+rte_dmadev_queue_setup(uint16_t dev_id,
+		       const struct rte_dmadev_queue_conf *conf)
+{
+	struct rte_dmadev *dev;
+
+	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(conf, -EINVAL);
+
+	dev = &rte_dmadevices[dev_id];
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
+
+	return (*dev->dev_ops->queue_setup)(dev, conf);
+}
+
+int
+rte_dmadev_queue_release(uint16_t dev_id, uint16_t vq_id)
+{
+	struct rte_dmadev *dev;
+
+	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+
+	dev = &rte_dmadevices[dev_id];
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
+
+	return (*dev->dev_ops->queue_release)(dev, vq_id);
+}
+
+int
+rte_dmadev_queue_info_get(uint16_t dev_id, uint16_t vq_id,
+			  struct rte_dmadev_queue_info *info)
+{
+	struct rte_dmadev *dev;
+
+	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(info, -EINVAL);
+
+	dev = &rte_dmadevices[dev_id];
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_info_get, -ENOTSUP);
+
+	memset(info, 0, sizeof(struct rte_dmadev_queue_info));
+	return (*dev->dev_ops->queue_info_get)(dev, vq_id, info);
+}
+
+int
+rte_dmadev_stats_get(uint16_t dev_id, int vq_id,
+		     struct rte_dmadev_stats *stats)
+{
+	struct rte_dmadev *dev;
+
+	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(stats, -EINVAL);
+
+	dev = &rte_dmadevices[dev_id];
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+
+	return (*dev->dev_ops->stats_get)(dev, vq_id, stats);
+}
+
+int
+rte_dmadev_stats_reset(uint16_t dev_id, int vq_id)
+{
+	struct rte_dmadev *dev;
+
+	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+
+	dev = &rte_dmadevices[dev_id];
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+
+	return (*dev->dev_ops->stats_reset)(dev, vq_id);
+}
+
+static int
+xstats_get_count(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get_names, -ENOTSUP);
+
+	return (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
+}
+
+int
+rte_dmadev_xstats_names_get(uint16_t dev_id,
+			    struct rte_dmadev_xstats_name *xstats_names,
+			    uint32_t size)
+{
+	struct rte_dmadev *dev;
+	int cnt_expected_entries;
+
+	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+
+	cnt_expected_entries = xstats_get_count(dev_id);
+
+	if (xstats_names == NULL || cnt_expected_entries < 0 ||
+	    (int)size < cnt_expected_entries || size == 0)
+		return cnt_expected_entries;
+
+	dev = &rte_dmadevices[dev_id];
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get_names, -ENOTSUP);
+	return (*dev->dev_ops->xstats_get_names)(dev, xstats_names, size);
+}
+
+int
+rte_dmadev_xstats_get(uint16_t dev_id, const uint32_t ids[],
+		      uint64_t values[], uint32_t n)
+{
+	struct rte_dmadev *dev;
+
+	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(ids, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(values, -EINVAL);
+
+	dev = &rte_dmadevices[dev_id];
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get, -ENOTSUP);
+
+	return (*dev->dev_ops->xstats_get)(dev, ids, values, n);
+}
+
+int
+rte_dmadev_xstats_reset(uint16_t dev_id, const uint32_t ids[], uint32_t nb_ids)
+{
+	struct rte_dmadev *dev;
+
+	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+
+	dev = &rte_dmadevices[dev_id];
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_reset, -ENOTSUP);
+
+	return (*dev->dev_ops->xstats_reset)(dev, ids, nb_ids);
+}
+
+int
+rte_dmadev_selftest(uint16_t dev_id)
+{
+	struct rte_dmadev *dev;
+
+	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+
+	dev = &rte_dmadevices[dev_id];
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
+
+	return (*dev->dev_ops->dev_selftest)(dev_id);
+}
+
+static inline uint16_t
+rte_dmadev_find_free_device_index(void)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].attached == RTE_DMADEV_DETACHED)
+			return i;
+	}
+
+	return RTE_DMADEV_MAX_DEVS;
+}
+
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name, size_t dev_priv_size, int socket_id)
+{
+	struct rte_dmadev *dev;
+	uint16_t dev_id;
+
+	if (rte_dmadev_get_dev_id(name) >= 0) {
+		RTE_DMADEV_LOG(ERR,
+			"device with name %s already allocated!", name);
+		return NULL;
+	}
+
+	dev_id = rte_dmadev_find_free_device_index();
+	if (dev_id == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR, "reached maximum number of DMA devices");
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+
+	if (dev_priv_size > 0) {
+		dev->dev_private = rte_zmalloc_socket("dmadev private",
+				     dev_priv_size,
+				     RTE_CACHE_LINE_SIZE,
+				     socket_id);
+		if (dev->dev_private == NULL) {
+			RTE_DMADEV_LOG(ERR,
+				"unable to allocate memory for dmadev");
+			return NULL;
+		}
+	}
+
+	dev->dev_id = dev_id;
+	dev->socket_id = socket_id;
+	dev->started = 0;
+	strlcpy(dev->name, name, RTE_DMADEV_NAME_MAX_LEN);
+
+	dev->attached = RTE_DMADEV_ATTACHED;
+
+	return dev;
+}
+
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev)
+{
+	int ret;
+
+	if (dev == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_close(dev->dev_id);
+	if (ret != 0)
+		return ret;
+
+	if (dev->dev_private != NULL)
+		rte_free(dev->dev_private);
+
+	memset(dev, 0, sizeof(struct rte_dmadev));
+	dev->attached = RTE_DMADEV_DETACHED;
+
+	return 0;
+}
+
+RTE_LOG_REGISTER(libdmadev_logtype, lib.dmadev, INFO);
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000..f74fc6a
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,919 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_H_
+#define _RTE_DMADEV_H_
+
+/**
+ * @file rte_dmadev.h
+ *
+ * RTE DMA (Direct Memory Access) device APIs.
+ *
+ * The generic DMA device diagram:
+ *
+ *            ------------     ------------
+ *            | HW-queue |     | HW-queue |
+ *            ------------     ------------
+ *                   \            /
+ *                    \          /
+ *                     \        /
+ *                  ----------------
+ *                  |dma-controller|
+ *                  ----------------
+ *
+ *   The DMA could have multiple HW-queues, each HW-queue could have multiple
+ *   capabilities, e.g. whether to support fill operation, supported DMA
+ *   transfter direction and etc.
+ *
+ * The DMA framework is built on the following abstraction model:
+ *
+ *     ------------    ------------
+ *     |virt-queue|    |virt-queue|
+ *     ------------    ------------
+ *            \           /
+ *             \         /
+ *              \       /
+ *            ------------     ------------
+ *            | HW-queue |     | HW-queue |
+ *            ------------     ------------
+ *                   \            /
+ *                    \          /
+ *                     \        /
+ *                     ----------
+ *                     | dmadev |
+ *                     ----------
+ *
+ *   a) The DMA operation request must be submitted to the virt queue, virt
+ *      queues must be created based on HW queues, the DMA device could have
+ *      multiple HW queues.
+ *   b) The virt queues on the same HW-queue could represent different contexts,
+ *      e.g. user could create virt-queue-0 on HW-queue-0 for mem-to-mem
+ *      transfer scenario, and create virt-queue-1 on the same HW-queue for
+ *      mem-to-dev transfer scenario.
+ *   NOTE: user could also create multiple virt queues for mem-to-mem transfer
+ *         scenario as long as the corresponding driver supports.
+ *
+ * The control plane APIs include configure/queue_setup/queue_release/start/
+ * stop/reset/close, in order to start device work, the call sequence must be
+ * as follows:
+ *     - rte_dmadev_configure()
+ *     - rte_dmadev_queue_setup()
+ *     - rte_dmadev_start()
+ *
+ * The dataplane APIs include two parts:
+ *   a) The first part is the submission of operation requests:
+ *        - rte_dmadev_copy()
+ *        - rte_dmadev_copy_sg() - scatter-gather form of copy
+ *        - rte_dmadev_fill()
+ *        - rte_dmadev_fill_sg() - scatter-gather form of fill
+ *        - rte_dmadev_fence()   - add a fence force ordering between operations
+ *        - rte_dmadev_perform() - issue doorbell to hardware
+ *      These APIs could work with different virt queues which have different
+ *      contexts.
+ *      The first four APIs are used to submit the operation request to the virt
+ *      queue, if the submission is successful, a cookie (as type
+ *      'dma_cookie_t') is returned, otherwise a negative number is returned.
+ *   b) The second part is to obtain the result of requests:
+ *        - rte_dmadev_completed()
+ *            - return the number of operation requests completed successfully.
+ *        - rte_dmadev_completed_fails()
+ *            - return the number of operation requests failed to complete.
+ *
+ * The misc APIs include info_get/queue_info_get/stats/xstats/selftest, provide
+ * information query and self-test capabilities.
+ *
+ * About the dataplane APIs MT-safe, there are two dimensions:
+ *   a) For one virt queue, the submit/completion API could be MT-safe,
+ *      e.g. one thread do submit operation, another thread do completion
+ *      operation.
+ *      If driver support it, then declare RTE_DMA_DEV_CAPA_MT_VQ.
+ *      If driver don't support it, it's up to the application to guarantee
+ *      MT-safe.
+ *   b) For multiple virt queues on the same HW queue, e.g. one thread do
+ *      operation on virt-queue-0, another thread do operation on virt-queue-1.
+ *      If driver support it, then declare RTE_DMA_DEV_CAPA_MT_MVQ.
+ *      If driver don't support it, it's up to the application to guarantee
+ *      MT-safe.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_errno.h>
+#include <rte_compat.h>
+
+/**
+ * dma_cookie_t - an opaque DMA cookie
+ *
+ * If dma_cookie_t is >=0 it's a DMA operation request cookie, <0 it's a error
+ * code.
+ * When using cookies, comply with the following rules:
+ * a) Cookies for each virtual queue are independent.
+ * b) For a virt queue, the cookie are monotonically incremented, when it reach
+ *    the INT_MAX, it wraps back to zero.
+ * c) The initial cookie of a virt queue is zero, after the device is stopped or
+ *    reset, the virt queue's cookie needs to be reset to zero.
+ * Example:
+ *    step-1: start one dmadev
+ *    step-2: enqueue a copy operation, the cookie return is 0
+ *    step-3: enqueue a copy operation again, the cookie return is 1
+ *    ...
+ *    step-101: stop the dmadev
+ *    step-102: start the dmadev
+ *    step-103: enqueue a copy operation, the cookie return is 0
+ *    ...
+ */
+typedef int32_t dma_cookie_t;
+
+/**
+ * dma_scatterlist - can hold scatter DMA operation request
+ */
+struct dma_scatterlist {
+	void *src;
+	void *dst;
+	uint32_t length;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_count(void);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the device identifier for the named DMA device.
+ *
+ * @param name
+ *   DMA device name to select the DMA device identifier.
+ *
+ * @return
+ *   Returns DMA device identifier on success.
+ *   - <0: Failure to find named DMA device.
+ */
+__rte_experimental
+int
+rte_dmadev_get_dev_id(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Return the NUMA socket to which a device is connected.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   The NUMA socket id to which the device is connected or
+ *   a default of zero if the socket could not be determined.
+ *   - -EINVAL: dev_id value is out of range.
+ */
+__rte_experimental
+int
+rte_dmadev_socket_id(uint16_t dev_id);
+
+/**
+ * The capabilities of a DMA device
+ */
+#define RTE_DMA_DEV_CAPA_M2M	(1ull << 0) /**< Support mem-to-mem transfer */
+#define RTE_DMA_DEV_CAPA_M2D	(1ull << 1) /**< Support mem-to-dev transfer */
+#define RTE_DMA_DEV_CAPA_D2M	(1ull << 2) /**< Support dev-to-mem transfer */
+#define RTE_DMA_DEV_CAPA_D2D	(1ull << 3) /**< Support dev-to-dev transfer */
+#define RTE_DMA_DEV_CAPA_COPY	(1ull << 4) /**< Support copy ops */
+#define RTE_DMA_DEV_CAPA_FILL	(1ull << 5) /**< Support fill ops */
+#define RTE_DMA_DEV_CAPA_SG	(1ull << 6) /**< Support scatter-gather ops */
+#define RTE_DMA_DEV_CAPA_FENCE	(1ull << 7) /**< Support fence ops */
+#define RTE_DMA_DEV_CAPA_IOVA	(1ull << 8) /**< Support IOVA as DMA address */
+#define RTE_DMA_DEV_CAPA_VA	(1ull << 9) /**< Support VA as DMA address */
+#define RTE_DMA_DEV_CAPA_MT_VQ	(1ull << 10) /**< Support MT-safe of one virt queue */
+#define RTE_DMA_DEV_CAPA_MT_MVQ	(1ull << 11) /**< Support MT-safe of multiple virt queues */
+
+/**
+ * A structure used to retrieve the contextual information of
+ * an DMA device
+ */
+struct rte_dmadev_info {
+	/**
+	 * Fields filled by framewok
+	 */
+	struct rte_device *device; /**< Generic Device information */
+	const char *driver_name; /**< Device driver name */
+	int socket_id; /**< Socket ID where memory is allocated */
+
+	/**
+	 * Specification fields filled by driver
+	 */
+	uint64_t dev_capa; /**< Device capabilities (RTE_DMA_DEV_CAPA_) */
+	uint16_t max_hw_queues; /**< Maximum number of HW queues. */
+	uint16_t max_vqs_per_hw_queue;
+	/**< Maximum number of virt queues to allocate per HW queue */
+	uint16_t max_desc;
+	/**< Maximum allowed number of virt queue descriptors */
+	uint16_t min_desc;
+	/**< Minimum allowed number of virt queue descriptors */
+
+	/**
+	 * Status fields filled by driver
+	 */
+	uint16_t nb_hw_queues; /**< Number of HW queues configured */
+	uint16_t nb_vqs; /**< Number of virt queues configured */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve the contextual information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
+ *   contextual information of the device.
+ * @return
+ *   - =0: Success, driver updates the contextual information of the DMA device
+ *   - <0: Error code returned by the driver info get function.
+ *
+ */
+__rte_experimental
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
+
+/**
+ * dma_address_type
+ */
+enum dma_address_type {
+	DMA_ADDRESS_TYPE_IOVA, /**< Use IOVA as dma address */
+	DMA_ADDRESS_TYPE_VA, /**< Use VA as dma address */
+};
+
+/**
+ * A structure used to configure a DMA device.
+ */
+struct rte_dmadev_conf {
+	enum dma_address_type addr_type; /**< Address type to used */
+	uint16_t nb_hw_queues; /**< Number of HW-queues enable to use */
+	uint16_t max_vqs; /**< Maximum number of virt queues to use */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * The caller may use rte_dmadev_info_get() to get the capability of each
+ * resources available for this DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dmadev_conf
+ *   object.
+ *
+ * @return
+ *   - =0: Success, device configured.
+ *   - <0: Error code returned by the driver configuration function.
+ */
+__rte_experimental
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device started.
+ *   - <0: Error code returned by the driver start function.
+ */
+__rte_experimental
+int
+rte_dmadev_start(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dmadev_start()
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device stopped.
+ *   - <0: Error code returned by the driver stop function.
+ */
+__rte_experimental
+int
+rte_dmadev_stop(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *  - =0: Successfully closing device
+ *  - <0: Failure to close device
+ */
+__rte_experimental
+int
+rte_dmadev_close(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset a DMA device.
+ *
+ * This is different from cycle of rte_dmadev_start->rte_dmadev_stop in the
+ * sense similar to hard or soft reset.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Successful reset device.
+ *   - <0: Failure to reset device.
+ *   - (-ENOTSUP): If the device doesn't support this function.
+ */
+__rte_experimental
+int
+rte_dmadev_reset(uint16_t dev_id);
+
+/**
+ * dma_transfer_direction
+ */
+enum dma_transfer_direction {
+	DMA_MEM_TO_MEM,
+	DMA_MEM_TO_DEV,
+	DMA_DEV_TO_MEM,
+	DMA_DEV_TO_DEV,
+};
+
+/**
+ * A structure used to configure a DMA virt queue.
+ */
+struct rte_dmadev_queue_conf {
+	enum dma_transfer_direction direction;
+	/**< Associated transfer direction */
+	uint16_t hw_queue_id; /**< The HW queue on which to create virt queue */
+	uint16_t nb_desc; /**< Number of descriptor for this virt queue */
+	uint64_t dev_flags; /**< Device specific flags */
+	void *dev_ctx; /**< Device specific context */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virt queue.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param conf
+ *   The queue configuration structure encapsulated into rte_dmadev_queue_conf
+ *   object.
+ *
+ * @return
+ *   - >=0: Allocate virt queue success, it is virt queue id.
+ *   - <0: Error code returned by the driver queue setup function.
+ */
+__rte_experimental
+int
+rte_dmadev_queue_setup(uint16_t dev_id,
+		       const struct rte_dmadev_queue_conf *conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Release a virt queue.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vq_id
+ *   The identifier of virt queue which return by queue setup.
+ *
+ * @return
+ *   - =0: Successful release the virt queue.
+ *   - <0: Error code returned by the driver queue release function.
+ */
+__rte_experimental
+int
+rte_dmadev_queue_release(uint16_t dev_id, uint16_t vq_id);
+
+/**
+ * A structure used to retrieve information of a DMA virt queue.
+ */
+struct rte_dmadev_queue_info {
+	enum dma_transfer_direction direction;
+	/**< Associated transfer direction */
+	uint16_t hw_queue_id; /**< The HW queue on which to create virt queue */
+	uint16_t nb_desc; /**< Number of descriptor for this virt queue */
+	uint64_t dev_flags; /**< Device specific flags */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA virt queue.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vq_id
+ *   The identifier of virt queue which return by queue setup.
+ * @param[out] info
+ *   The queue info structure encapsulated into rte_dmadev_queue_info object.
+ *
+ * @return
+ *   - =0: Successful retrieve information.
+ *   - <0: Error code returned by the driver queue release function.
+ */
+__rte_experimental
+int
+rte_dmadev_queue_info_get(uint16_t dev_id, uint16_t vq_id,
+			  struct rte_dmadev_queue_info *info);
+
+#include "rte_dmadev_core.h"
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the DMA virt queue.
+ *
+ * This queues up a copy operation to be performed by hardware, but does not
+ * trigger hardware to begin that operation.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vq_id
+ *   The identifier of virt queue.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An opaque flags for this operation.
+ *
+ * @return
+ *   dma_cookie_t: please refer to the corresponding definition.
+ *
+ * NOTE: The caller must ensure that the input parameter is valid and the
+ *       corresponding device supports the operation.
+ */
+__rte_experimental
+static inline dma_cookie_t
+rte_dmadev_copy(uint16_t dev_id, uint16_t vq_id, void *src, void *dst,
+		uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	return (*dev->copy)(dev, vq_id, src, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter list copy operation onto the DMA virt queue.
+ *
+ * This queues up a scatter list copy operation to be performed by hardware,
+ * but does not trigger hardware to begin that operation.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vq_id
+ *   The identifier of virt queue.
+ * @param sg
+ *   The pointer of scatterlist.
+ * @param sg_len
+ *   The number of scatterlist elements.
+ * @param flags
+ *   An opaque flags for this operation.
+ *
+ * @return
+ *   dma_cookie_t: please refer to the corresponding definition.
+ *
+ * NOTE: The caller must ensure that the input parameter is valid and the
+ *       corresponding device supports the operation.
+ */
+__rte_experimental
+static inline dma_cookie_t
+rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vq_id,
+		   const struct dma_scatterlist *sg,
+		   uint32_t sg_len, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	return (*dev->copy_sg)(dev, vq_id, sg, sg_len, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the DMA virt queue
+ *
+ * This queues up a fill operation to be performed by hardware, but does not
+ * trigger hardware to begin that operation.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vq_id
+ *   The identifier of virt queue.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An opaque flags for this operation.
+ *
+ * @return
+ *   dma_cookie_t: please refer to the corresponding definition.
+ *
+ * NOTE: The caller must ensure that the input parameter is valid and the
+ *       corresponding device supports the operation.
+ */
+__rte_experimental
+static inline dma_cookie_t
+rte_dmadev_fill(uint16_t dev_id, uint16_t vq_id, uint64_t pattern,
+		void *dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	return (*dev->fill)(dev, vq_id, pattern, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter list fill operation onto the DMA virt queue
+ *
+ * This queues up a scatter list fill operation to be performed by hardware,
+ * but does not trigger hardware to begin that operation.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vq_id
+ *   The identifier of virt queue.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param sg
+ *   The pointer of scatterlist.
+ * @param sg_len
+ *   The number of scatterlist elements.
+ * @param flags
+ *   An opaque flags for this operation.
+ *
+ * @return
+ *   dma_cookie_t: please refer to the corresponding definition.
+ *
+ * NOTE: The caller must ensure that the input parameter is valid and the
+ *       corresponding device supports the operation.
+ */
+__rte_experimental
+static inline dma_cookie_t
+rte_dmadev_fill_sg(uint16_t dev_id, uint16_t vq_id, uint64_t pattern,
+		   const struct dma_scatterlist *sg, uint32_t sg_len,
+		   uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	return (*dev->fill_sg)(dev, vq_id, pattern, sg, sg_len, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Add a fence to force ordering between operations
+ *
+ * This adds a fence to a sequence of operations to enforce ordering, such that
+ * all operations enqueued before the fence must be completed before operations
+ * after the fence.
+ * NOTE: Since this fence may be added as a flag to the last operation enqueued,
+ * this API may not function correctly when called immediately after an
+ * "rte_dmadev_perform" call i.e. before any new operations are enqueued.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vq_id
+ *   The identifier of virt queue.
+ *
+ * @return
+ *   - =0: Successful add fence.
+ *   - <0: Failure to add fence.
+ *
+ * NOTE: The caller must ensure that the input parameter is valid and the
+ *       corresponding device supports the operation.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_fence(uint16_t dev_id, uint16_t vq_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	return (*dev->fence)(dev, vq_id);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dmadev_copy/fill()
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vq_id
+ *   The identifier of virt queue.
+ *
+ * @return
+ *   - =0: Successful trigger hardware.
+ *   - <0: Failure to trigger hardware.
+ *
+ * NOTE: The caller must ensure that the input parameter is valid and the
+ *       corresponding device supports the operation.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_perform(uint16_t dev_id, uint16_t vq_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	return (*dev->perform)(dev, vq_id);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successful completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vq_id
+ *   The identifier of virt queue.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] cookie
+ *   The last completed operation's cookie.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *
+ * @return
+ *   The number of operations that successful completed.
+ *
+ * NOTE: The caller must ensure that the input parameter is valid and the
+ *       corresponding device supports the operation.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dmadev_completed(uint16_t dev_id, uint16_t vq_id, const uint16_t nb_cpls,
+		     dma_cookie_t *cookie, bool *has_error)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	has_error = false;
+	return (*dev->completed)(dev, vq_id, nb_cpls, cookie, has_error);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that failed to complete.
+ * NOTE: This API was used when rte_dmadev_completed has_error was set.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vq_id
+ *   The identifier of virt queue.
+ * @param nb_status
+ *   Indicates the size of status array.
+ * @param[out] status
+ *   The error code of operations that failed to complete.
+ * @param[out] cookie
+ *   The last failed completed operation's cookie.
+ *
+ * @return
+ *   The number of operations that failed to complete.
+ *
+ * NOTE: The caller must ensure that the input parameter is valid and the
+ *       corresponding device supports the operation.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vq_id,
+			   const uint16_t nb_status, uint32_t *status,
+			   dma_cookie_t *cookie)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	return (*dev->completed_fails)(dev, vq_id, nb_status, status, cookie);
+}
+
+struct rte_dmadev_stats {
+	uint64_t enqueue_fail_count;
+	/**< Conut of all operations which failed enqueued */
+	uint64_t enqueued_count;
+	/**< Count of all operations which successful enqueued */
+	uint64_t completed_fail_count;
+	/**< Count of all operations which failed to complete */
+	uint64_t completed_count;
+	/**< Count of all operations which successful complete */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all DMA virt queue(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vq_id
+ *   The identifier of virt queue, -1 means all virt queues.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dmadev_stats
+ *   object.
+ *
+ * @return
+ *   - =0: Successful retrieve stats.
+ *   - <0: Failure to retrieve stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_get(uint16_t dev_id, int vq_id,
+		     struct rte_dmadev_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all DMA virt queue(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vq_id
+ *   The identifier of virt queue, -1 means all virt queues.
+ *
+ * @return
+ *   - =0: Successful retrieve stats.
+ *   - <0: Failure to retrieve stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_reset(uint16_t dev_id, int vq_id);
+
+/** Maximum name length for extended statistics counters */
+#define RTE_DMA_DEV_XSTATS_NAME_SIZE 64
+
+/**
+ * A name-key lookup element for extended statistics.
+ *
+ * This structure is used to map between names and ID numbers
+ * for extended ethdev statistics.
+ */
+struct rte_dmadev_xstats_name {
+	char name[RTE_DMA_DEV_XSTATS_NAME_SIZE];
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve names of extended statistics of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] xstats_names
+ *   Block of memory to insert names into. Must be at least size in capacity.
+ *   If set to NULL, function returns required capacity.
+ * @param size
+ *   Capacity of xstats_names (number of names).
+ * @return
+ *   - positive value lower or equal to size: success. The return value
+ *     is the number of entries filled in the stats table.
+ *   - positive value higher than size: error, the given statistics table
+ *     is too small. The return value corresponds to the size that should
+ *     be given to succeed. The entries in the table are not valid and
+ *     shall not be used by the caller.
+ *   - negative value on error.
+ */
+__rte_experimental
+int
+rte_dmadev_xstats_names_get(uint16_t dev_id,
+			    struct rte_dmadev_xstats_name *xstats_names,
+			    uint32_t size);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve extended statistics of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param ids
+ *   The id numbers of the stats to get. The ids can be got from the stat
+ *   position in the stat list from rte_dmadev_get_xstats_names().
+ * @param[out] values
+ *   The values for each stats request by ID.
+ * @param n
+ *   The number of stats requested.
+ *
+ * @return
+ *   - positive value: number of stat entries filled into the values array.
+ *   - negative value on error.
+ */
+__rte_experimental
+int
+rte_dmadev_xstats_get(uint16_t dev_id, const uint32_t ids[],
+		      uint64_t values[], uint32_t n);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset the values of the xstats of the selected component in the device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param ids
+ *   Selects specific statistics to be reset. When NULL, all statistics
+ *   will be reset. If non-NULL, must point to array of at least
+ *   *nb_ids* size.
+ * @param nb_ids
+ *   The number of ids available from the *ids* array. Ignored when ids is NULL.
+ *
+ * @return
+ *   - zero: successfully reset the statistics to zero.
+ *   - negative value on error.
+ */
+__rte_experimental
+int
+rte_dmadev_xstats_reset(uint16_t dev_id, const uint32_t ids[], uint32_t nb_ids);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger the dmadev self test.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - 0: Selftest successful.
+ *   - -ENOTSUP if the device doesn't support selftest
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_selftest(uint16_t dev_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_H_ */
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000..a3afea2
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_CORE_H_
+#define _RTE_DMADEV_CORE_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device internal header.
+ *
+ * This header contains internal data types. But they are still part of the
+ * public API because they are used by inline public functions.
+ */
+
+struct rte_dmadev;
+
+typedef dma_cookie_t (*dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vq_id,
+				      void *src, void *dst,
+				      uint32_t length, uint64_t flags);
+/**< @internal Function used to enqueue a copy operation. */
+
+typedef dma_cookie_t (*dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vq_id,
+					 const struct dma_scatterlist *sg,
+					 uint32_t sg_len, uint64_t flags);
+/**< @internal Function used to enqueue a scatter list copy operation. */
+
+typedef dma_cookie_t (*dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vq_id,
+				      uint64_t pattern, void *dst,
+				      uint32_t length, uint64_t flags);
+/**< @internal Function used to enqueue a fill operation. */
+
+typedef dma_cookie_t (*dmadev_fill_sg_t)(struct rte_dmadev *dev, uint16_t vq_id,
+			uint64_t pattern, const struct dma_scatterlist *sg,
+			uint32_t sg_len, uint64_t flags);
+/**< @internal Function used to enqueue a scatter list fill operation. */
+
+typedef int (*dmadev_fence_t)(struct rte_dmadev *dev, uint16_t vq_id);
+/**< @internal Function used to add a fence ordering between operations. */
+
+typedef int (*dmadev_perform_t)(struct rte_dmadev *dev, uint16_t vq_id);
+/**< @internal Function used to trigger hardware to begin performing. */
+
+typedef uint16_t (*dmadev_completed_t)(struct rte_dmadev *dev, uint16_t vq_id,
+				       const uint16_t nb_cpls,
+				       dma_cookie_t *cookie, bool *has_error);
+/**< @internal Function used to return number of successful completed operations */
+
+typedef uint16_t (*dmadev_completed_fails_t)(struct rte_dmadev *dev,
+			uint16_t vq_id, const uint16_t nb_status,
+			uint32_t *status, dma_cookie_t *cookie);
+/**< @internal Function used to return number of failed completed operations */
+
+#define RTE_DMADEV_NAME_MAX_LEN	64 /**< Max length of name of DMA PMD */
+
+struct rte_dmadev_ops;
+
+/**
+ * The data structure associated with each DMA device.
+ */
+struct rte_dmadev {
+	/**< Enqueue a copy operation onto the DMA device. */
+	dmadev_copy_t copy;
+	/**< Enqueue a scatter list copy operation onto the DMA device. */
+	dmadev_copy_sg_t copy_sg;
+	/**< Enqueue a fill operation onto the DMA device. */
+	dmadev_fill_t fill;
+	/**< Enqueue a scatter list fill operation onto the DMA device. */
+	dmadev_fill_sg_t fill_sg;
+	/**< Add a fence to force ordering between operations. */
+	dmadev_fence_t fence;
+	/**< Trigger hardware to begin performing enqueued operations. */
+	dmadev_perform_t perform;
+	/**< Returns the number of operations that successful completed. */
+	dmadev_completed_t completed;
+	/**< Returns the number of operations that failed to complete. */
+	dmadev_completed_fails_t completed_fails;
+
+	void *dev_private; /**< PMD-specific private data */
+	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD */
+
+	uint16_t dev_id; /**< Device ID for this instance */
+	int socket_id; /**< Socket ID where memory is allocated */
+	struct rte_device *device;
+	/**< Device info. supplied during device initialization */
+	const char *driver_name; /**< Driver info. supplied by probing */
+	char name[RTE_DMADEV_NAME_MAX_LEN]; /**< Device name */
+
+	RTE_STD_C11
+	uint8_t attached : 1; /**< Flag indicating the device is attached */
+	uint8_t started : 1; /**< Device state: STARTED(1)/STOPPED(0) */
+
+} __rte_cache_aligned;
+
+extern struct rte_dmadev rte_dmadevices[];
+
+#endif /* _RTE_DMADEV_CORE_H_ */
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000..ef03cf7
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_PMD_H_
+#define _RTE_DMADEV_PMD_H_
+
+/** @file
+ * RTE DMA PMD APIs
+ *
+ * @note
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+
+#include <rte_dev.h>
+#include <rte_log.h>
+#include <rte_common.h>
+
+#include "rte_dmadev.h"
+
+extern int libdmadev_logtype;
+
+#define RTE_DMADEV_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, libdmadev_logtype, "%s(): " fmt "\n", \
+		__func__, ##args)
+
+/* Macros to check for valid device */
+#define RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dmadev_pmd_is_valid_dev((dev_id))) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%d", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+#define RTE_DMADEV_VALID_DEVID_OR_RET(dev_id) do { \
+	if (!rte_dmadev_pmd_is_valid_dev((dev_id))) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%d", dev_id); \
+		return; \
+	} \
+} while (0)
+
+#define RTE_DMADEV_DETACHED  0
+#define RTE_DMADEV_ATTACHED  1
+
+/**
+ * Validate if the DMA device index is a valid attached DMA device.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (1) or not (0).
+ */
+static inline unsigned
+rte_dmadev_pmd_is_valid_dev(uint16_t dev_id)
+{
+	struct rte_dmadev *dev;
+
+	if (dev_id >= RTE_DMADEV_MAX_DEVS)
+		return 0;
+
+	dev = &rte_dmadevices[dev_id];
+	if (dev->attached != RTE_DMADEV_ATTACHED)
+		return 0;
+	else
+		return 1;
+}
+
+/**
+ * Definitions of control-plane functions exported by a driver through the
+ * generic structure of type *rte_dmadev_ops* supplied in the *rte_dmadev*
+ * structure associated with a device.
+ */
+
+typedef int (*dmadev_info_get_t)(struct rte_dmadev *dev,
+				 struct rte_dmadev_info *dev_info);
+/**< @internal Function used to get device information of a device. */
+
+typedef int (*dmadev_configure_t)(struct rte_dmadev *dev,
+				  const struct rte_dmadev_conf *dev_conf);
+/**< @internal Function used to configure a device. */
+
+typedef int (*dmadev_start_t)(struct rte_dmadev *dev);
+/**< @internal Function used to start a configured device. */
+
+typedef int (*dmadev_stop_t)(struct rte_dmadev *dev);
+/**< @internal Function used to stop a configured device. */
+
+typedef int (*dmadev_close_t)(struct rte_dmadev *dev);
+/**< @internal Function used to close a configured device. */
+
+typedef int (*dmadev_reset_t)(struct rte_dmadev *dev);
+/**< @internal Function used to reset a configured device. */
+
+typedef int (*dmadev_queue_setup_t)(struct rte_dmadev *dev,
+				    const struct rte_dmadev_queue_conf *conf);
+/**< @internal Function used to allocate and set up a virt queue. */
+
+typedef int (*dmadev_queue_release_t)(struct rte_dmadev *dev, uint16_t vq_id);
+/**< @internal Function used to release a virt queue. */
+
+typedef int (*dmadev_queue_info_t)(struct rte_dmadev *dev, uint16_t vq_id,
+				   struct rte_dmadev_queue_info *info);
+/**< @internal Function used to retrieve information of a virt queue. */
+
+typedef int (*dmadev_stats_get_t)(struct rte_dmadev *dev, int vq_id,
+				  struct rte_dmadev_stats *stats);
+/**< @internal Function used to retrieve basic statistics. */
+
+typedef int (*dmadev_stats_reset_t)(struct rte_dmadev *dev, int vq_id);
+/**< @internal Function used to reset basic statistics. */
+
+typedef int (*dmadev_xstats_get_names_t)(const struct rte_dmadev *dev,
+		struct rte_dmadev_xstats_name *xstats_names,
+		uint32_t size);
+/**< @internal Function used to get names of extended stats. */
+
+typedef int (*dmadev_xstats_get_t)(const struct rte_dmadev *dev,
+		const uint32_t ids[], uint64_t values[], uint32_t n);
+/**< @internal Function used to retrieve extended stats. */
+
+typedef int (*dmadev_xstats_reset_t)(struct rte_dmadev *dev,
+				     const uint32_t ids[], uint32_t nb_ids);
+/**< @internal Function used to reset extended stats. */
+
+typedef int (*dmadev_selftest_t)(uint16_t dev_id);
+/**< @internal Function used to start dmadev selftest. */
+
+/** DMA device operations function pointer table */
+struct rte_dmadev_ops {
+	/**< Get device info. */
+	dmadev_info_get_t dev_info_get;
+	/**< Configure device. */
+	dmadev_configure_t dev_configure;
+	/**< Start device. */
+	dmadev_start_t dev_start;
+	/**< Stop device. */
+	dmadev_stop_t dev_stop;
+	/**< Close device. */
+	dmadev_close_t dev_close;
+	/**< Reset device. */
+	dmadev_reset_t dev_reset;
+
+	/**< Allocate and set up a virt queue. */
+	dmadev_queue_setup_t queue_setup;
+	/**< Release a virt queue. */
+	dmadev_queue_release_t queue_release;
+	/**< Retrieve information of a virt queue */
+	dmadev_queue_info_t queue_info_get;
+
+	/**< Get basic statistics. */
+	dmadev_stats_get_t stats_get;
+	/**< Reset basic statistics. */
+	dmadev_stats_reset_t stats_reset;
+	/**< Get names of extended stats. */
+	dmadev_xstats_get_names_t xstats_get_names;
+	/**< Get extended statistics. */
+	dmadev_xstats_get_t xstats_get;
+	/**< Reset extended statistics values. */
+	dmadev_xstats_reset_t xstats_reset;
+
+	/**< Device selftest function */
+	dmadev_selftest_t dev_selftest;
+};
+
+/**
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   Unique identifier name for each device
+ * @param dev_private_size
+ *   Size of private data memory allocated within rte_dmadev object.
+ *   Set to 0 to disable internal memory allocation and allow for
+ *   self-allocation.
+ * @param socket_id
+ *   Socket to allocate resources on.
+ *
+ * @return
+ *   - NULL: Failure to allocate
+ *   - Other: The rte_dmadev structure pointer for the new device
+ */
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name, size_t dev_private_size,
+			int socket_id);
+
+/**
+ * Release the specified dmadev device.
+ *
+ * @param dev
+ *   The *dmadev* pointer is the address of the *rte_dmadev* structure.
+ *
+ * @return
+ *   - 0 on success, negative on error
+ */
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_PMD_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000..383b3ca
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,32 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dmadev_count;
+	rte_dmadev_get_dev_id;
+	rte_dmadev_socket_id;
+	rte_dmadev_info_get;
+	rte_dmadev_configure;
+	rte_dmadev_start;
+	rte_dmadev_stop;
+	rte_dmadev_close;
+	rte_dmadev_reset;
+	rte_dmadev_queue_setup;
+	rte_dmadev_queue_release;
+	rte_dmadev_queue_info_get;
+	rte_dmadev_copy;
+	rte_dmadev_copy_sg;
+	rte_dmadev_fill;
+	rte_dmadev_fill_sg;
+	rte_dmadev_fence;
+	rte_dmadev_perform;
+	rte_dmadev_completed;
+	rte_dmadev_completed_fails;
+	rte_dmadev_stats_get;
+	rte_dmadev_stats_reset;
+	rte_dmadev_xstats_names_get;
+	rte_dmadev_xstats_get;
+	rte_dmadev_xstats_reset;
+	rte_dmadev_selftest;
+
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4..68d239f 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -60,6 +60,7 @@ libraries = [
         'bpf',
         'graph',
         'node',
+        'dmadev',
 ]
 
 if is_windows
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
@ 2021-07-02 13:59 ` Bruce Richardson
  2021-07-04  9:30 ` Jerin Jacob
                   ` (28 subsequent siblings)
  29 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-02 13:59 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, dev, mb, nipun.gupta,
	hemant.agrawal, maxime.coquelin, honnappa.nagarahalli,
	david.marchand, sburla, pkapoor, konstantin.ananyev, liangma

On Fri, Jul 02, 2021 at 09:18:11PM +0800, Chengwen Feng wrote:
> This patch introduces 'dmadevice' which is a generic type of DMA
> device.
> 
> The APIs of dmadev library exposes some generic operations which can
> enable configuration and I/O with the DMA devices.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> ---
Thanks for this new revision. We will try porting our driver
implementations under this API and see how it performs. We'll send on
feedback later based on that and based on code review.

/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
  2021-07-02 13:59 ` Bruce Richardson
@ 2021-07-04  9:30 ` Jerin Jacob
  2021-07-05 10:52   ` Bruce Richardson
  2021-07-06  3:01   ` fengchengwen
  2021-07-04 14:57 ` Andrew Rybchenko
                   ` (27 subsequent siblings)
  29 siblings, 2 replies; 339+ messages in thread
From: Jerin Jacob @ 2021-07-04  9:30 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: Thomas Monjalon, Ferruh Yigit, Richardson, Bruce, Jerin Jacob,
	dpdk-dev, Morten Brørup, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma,
	Radha Mohan Chintakuntla

On Fri, Jul 2, 2021 at 6:51 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
>
> This patch introduces 'dmadevice' which is a generic type of DMA
> device.
>
> The APIs of dmadev library exposes some generic operations which can
> enable configuration and I/O with the DMA devices.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>

Thanks for v1.

I would suggest finalizing  lib/dmadev/rte_dmadev.h before doing the
implementation so that you don't need
to waste time on rewoking the implementation.

Comments inline.

> ---
>  MAINTAINERS                  |   4 +
>  config/rte_config.h          |   3 +
>  lib/dmadev/meson.build       |   6 +
>  lib/dmadev/rte_dmadev.c      | 438 +++++++++++++++++++++
>  lib/dmadev/rte_dmadev.h      | 919 +++++++++++++++++++++++++++++++++++++++++++
>  lib/dmadev/rte_dmadev_core.h |  98 +++++
>  lib/dmadev/rte_dmadev_pmd.h  | 210 ++++++++++
>  lib/dmadev/version.map       |  32 ++

Missed to update doxygen. See doc/api/doxy-api.conf.in
Use meson  -Denable_docs=true to verify the generated doxgen doc.

>  lib/meson.build              |   1 +
>  9 files changed, 1711 insertions(+)
>  create mode 100644 lib/dmadev/meson.build
>  create mode 100644 lib/dmadev/rte_dmadev.c
>  create mode 100644 lib/dmadev/rte_dmadev.h
>  create mode 100644 lib/dmadev/rte_dmadev_core.h
>  create mode 100644 lib/dmadev/rte_dmadev_pmd.h
>  create mode 100644 lib/dmadev/version.map
>
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 4347555..2019783 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -496,6 +496,10 @@ F: drivers/raw/skeleton/
>  F: app/test/test_rawdev.c
>  F: doc/guides/prog_guide/rawdev.rst
>

Add EXPERIMENTAL

> +Dma device API
> +M: Chengwen Feng <fengchengwen@huawei.com>
> +F: lib/dmadev/
> +
>

> new file mode 100644
> index 0000000..a94e839
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev.c
> @@ -0,0 +1,438 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright 2021 HiSilicon Limited.
> + */
> +
> +#include <ctype.h>
> +#include <stdlib.h>
> +#include <string.h>
> +#include <stdint.h>
> +
> +#include <rte_log.h>
> +#include <rte_debug.h>
> +#include <rte_dev.h>
> +#include <rte_memory.h>
> +#include <rte_memzone.h>
> +#include <rte_malloc.h>
> +#include <rte_errno.h>
> +#include <rte_string_fns.h>

Sort in alphabetical order.

> +
> +#include "rte_dmadev.h"
> +#include "rte_dmadev_pmd.h"
> +
> +struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];

# Please check have you missed any multiprocess angle.
lib/regexdev/rte_regexdev.c is latest device class implemented in dpdk and
please check *rte_regexdev_shared_data scheme.


# Missing dynamic log for this library.


> diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
> new file mode 100644
> index 0000000..f74fc6a
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev.h
> @@ -0,0 +1,919 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright 2021 HiSilicon Limited.

It would be nice to add other companies' names who have contributed to
the specification.

> + */
> +
> +#ifndef _RTE_DMADEV_H_
> +#define _RTE_DMADEV_H_
> +
> +/**
> + * @file rte_dmadev.h
> + *
> + * RTE DMA (Direct Memory Access) device APIs.
> + *
> + * The generic DMA device diagram:
> + *
> + *            ------------     ------------
> + *            | HW-queue |     | HW-queue |
> + *            ------------     ------------
> + *                   \            /
> + *                    \          /
> + *                     \        /
> + *                  ----------------
> + *                  |dma-controller|
> + *                  ----------------
> + *
> + *   The DMA could have multiple HW-queues, each HW-queue could have multiple
> + *   capabilities, e.g. whether to support fill operation, supported DMA
> + *   transfter direction and etc.

typo

> + *
> + * The DMA framework is built on the following abstraction model:
> + *
> + *     ------------    ------------
> + *     |virt-queue|    |virt-queue|
> + *     ------------    ------------
> + *            \           /
> + *             \         /
> + *              \       /
> + *            ------------     ------------
> + *            | HW-queue |     | HW-queue |
> + *            ------------     ------------
> + *                   \            /
> + *                    \          /
> + *                     \        /
> + *                     ----------
> + *                     | dmadev |
> + *                     ----------

Continuing the discussion with @Morten Brørup , I think, we need to
finalize the model.

> + *   a) The DMA operation request must be submitted to the virt queue, virt
> + *      queues must be created based on HW queues, the DMA device could have
> + *      multiple HW queues.
> + *   b) The virt queues on the same HW-queue could represent different contexts,
> + *      e.g. user could create virt-queue-0 on HW-queue-0 for mem-to-mem
> + *      transfer scenario, and create virt-queue-1 on the same HW-queue for
> + *      mem-to-dev transfer scenario.
> + *   NOTE: user could also create multiple virt queues for mem-to-mem transfer
> + *         scenario as long as the corresponding driver supports.
> + *
> + * The control plane APIs include configure/queue_setup/queue_release/start/
> + * stop/reset/close, in order to start device work, the call sequence must be
> + * as follows:
> + *     - rte_dmadev_configure()
> + *     - rte_dmadev_queue_setup()
> + *     - rte_dmadev_start()

Please add reconfigure behaviour etc, Please check the
lib/regexdev/rte_regexdev.h
introduction. I have added similar ones so you could reuse as much as possible.


> + * The dataplane APIs include two parts:
> + *   a) The first part is the submission of operation requests:
> + *        - rte_dmadev_copy()
> + *        - rte_dmadev_copy_sg() - scatter-gather form of copy
> + *        - rte_dmadev_fill()
> + *        - rte_dmadev_fill_sg() - scatter-gather form of fill
> + *        - rte_dmadev_fence()   - add a fence force ordering between operations
> + *        - rte_dmadev_perform() - issue doorbell to hardware
> + *      These APIs could work with different virt queues which have different
> + *      contexts.
> + *      The first four APIs are used to submit the operation request to the virt
> + *      queue, if the submission is successful, a cookie (as type
> + *      'dma_cookie_t') is returned, otherwise a negative number is returned.
> + *   b) The second part is to obtain the result of requests:
> + *        - rte_dmadev_completed()
> + *            - return the number of operation requests completed successfully.
> + *        - rte_dmadev_completed_fails()
> + *            - return the number of operation requests failed to complete.
> + *
> + * The misc APIs include info_get/queue_info_get/stats/xstats/selftest, provide
> + * information query and self-test capabilities.
> + *
> + * About the dataplane APIs MT-safe, there are two dimensions:
> + *   a) For one virt queue, the submit/completion API could be MT-safe,
> + *      e.g. one thread do submit operation, another thread do completion
> + *      operation.
> + *      If driver support it, then declare RTE_DMA_DEV_CAPA_MT_VQ.
> + *      If driver don't support it, it's up to the application to guarantee
> + *      MT-safe.
> + *   b) For multiple virt queues on the same HW queue, e.g. one thread do
> + *      operation on virt-queue-0, another thread do operation on virt-queue-1.
> + *      If driver support it, then declare RTE_DMA_DEV_CAPA_MT_MVQ.
> + *      If driver don't support it, it's up to the application to guarantee
> + *      MT-safe.

From an application PoV it may not be good to write portable
applications. Please check
latest thread with @Morten Brørup

> + */
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +#include <rte_common.h>
> +#include <rte_memory.h>
> +#include <rte_errno.h>
> +#include <rte_compat.h>

Sort in alphabetical order.

> +
> +/**
> + * dma_cookie_t - an opaque DMA cookie

Since we are defining the behaviour is not opaque any more.
I think, it is better to call ring_idx or so.


> +#define RTE_DMA_DEV_CAPA_MT_MVQ (1ull << 11) /**< Support MT-safe of multiple virt queues */

Please lot of @see for all symbols where it is being used. So that one
can understand the full scope of
symbols. See below example.

#define RTE_REGEXDEV_CAPA_RUNTIME_COMPILATION_F (1ULL << 0)
/**< RegEx device does support compiling the rules at runtime unlike
 * loading only the pre-built rule database using
 * struct rte_regexdev_config::rule_db in rte_regexdev_configure()
 *
 * @see struct rte_regexdev_config::rule_db, rte_regexdev_configure()
 * @see struct rte_regexdev_info::regexdev_capa
 */

> + *
> + * If dma_cookie_t is >=0 it's a DMA operation request cookie, <0 it's a error
> + * code.
> + * When using cookies, comply with the following rules:
> + * a) Cookies for each virtual queue are independent.
> + * b) For a virt queue, the cookie are monotonically incremented, when it reach
> + *    the INT_MAX, it wraps back to zero.
> + * c) The initial cookie of a virt queue is zero, after the device is stopped or
> + *    reset, the virt queue's cookie needs to be reset to zero.
> + * Example:
> + *    step-1: start one dmadev
> + *    step-2: enqueue a copy operation, the cookie return is 0
> + *    step-3: enqueue a copy operation again, the cookie return is 1
> + *    ...
> + *    step-101: stop the dmadev
> + *    step-102: start the dmadev
> + *    step-103: enqueue a copy operation, the cookie return is 0
> + *    ...
> + */

Good explanation.

> +typedef int32_t dma_cookie_t;


> +
> +/**
> + * dma_scatterlist - can hold scatter DMA operation request
> + */
> +struct dma_scatterlist {

I prefer to change scatterlist -> sg
i.e rte_dma_sg

> +       void *src;
> +       void *dst;
> +       uint32_t length;
> +};
> +

> +
> +/**
> + * A structure used to retrieve the contextual information of
> + * an DMA device
> + */
> +struct rte_dmadev_info {
> +       /**
> +        * Fields filled by framewok

typo.

> +        */
> +       struct rte_device *device; /**< Generic Device information */
> +       const char *driver_name; /**< Device driver name */
> +       int socket_id; /**< Socket ID where memory is allocated */
> +
> +       /**
> +        * Specification fields filled by driver
> +        */
> +       uint64_t dev_capa; /**< Device capabilities (RTE_DMA_DEV_CAPA_) */
> +       uint16_t max_hw_queues; /**< Maximum number of HW queues. */
> +       uint16_t max_vqs_per_hw_queue;
> +       /**< Maximum number of virt queues to allocate per HW queue */
> +       uint16_t max_desc;
> +       /**< Maximum allowed number of virt queue descriptors */
> +       uint16_t min_desc;
> +       /**< Minimum allowed number of virt queue descriptors */

Please add max_nb_segs. i.e maximum number of segments supported.

> +
> +       /**
> +        * Status fields filled by driver
> +        */
> +       uint16_t nb_hw_queues; /**< Number of HW queues configured */
> +       uint16_t nb_vqs; /**< Number of virt queues configured */
> +};
> + i
> +
> +/**
> + * dma_address_type
> + */
> +enum dma_address_type {
> +       DMA_ADDRESS_TYPE_IOVA, /**< Use IOVA as dma address */
> +       DMA_ADDRESS_TYPE_VA, /**< Use VA as dma address */
> +};
> +
> +/**
> + * A structure used to configure a DMA device.
> + */
> +struct rte_dmadev_conf {
> +       enum dma_address_type addr_type; /**< Address type to used */

I think, there are 3 kinds of limitations/capabilities.

When the system is configured as IOVA as VA
1) Device supports any VA address like memory from rte_malloc(),
rte_memzone(), malloc, stack memory
2) Device support only VA address from rte_malloc(), rte_memzone() i.e
memory backed by hugepage and added to DMA map.

When the system is configured as IOVA as PA
1) Devices support only PA addresses .

IMO, Above needs to be  advertised as capability and application needs
to align with that
and I dont think application requests the driver to work in any of the modes.



> +       uint16_t nb_hw_queues; /**< Number of HW-queues enable to use */
> +       uint16_t max_vqs; /**< Maximum number of virt queues to use */

You need to what is max value allowed etc i.e it is based on
info_get() and mention the field
in info structure


> +
> +/**
> + * dma_transfer_direction
> + */
> +enum dma_transfer_direction {

rte_dma_transter_direction

> +       DMA_MEM_TO_MEM,
> +       DMA_MEM_TO_DEV,
> +       DMA_DEV_TO_MEM,
> +       DMA_DEV_TO_DEV,
> +};
> +
> +/**
> + * A structure used to configure a DMA virt queue.
> + */
> +struct rte_dmadev_queue_conf {
> +       enum dma_transfer_direction direction;


> +       /**< Associated transfer direction */
> +       uint16_t hw_queue_id; /**< The HW queue on which to create virt queue */
> +       uint16_t nb_desc; /**< Number of descriptor for this virt queue */
> +       uint64_t dev_flags; /**< Device specific flags */

Use of this? Need more comments on this.
Since it is in slowpath, We can have non opaque names here based on
each driver capability.


> +       void *dev_ctx; /**< Device specific context */

Use of this ? Need more comment ont this.


Please add some good amount of reserved bits and have API to init this
structure for future ABI stability, say rte_dmadev_queue_config_init()
or so.


> +
> +/**
> + * A structure used to retrieve information of a DMA virt queue.
> + */
> +struct rte_dmadev_queue_info {
> +       enum dma_transfer_direction direction;

A queue may support all directions so I think it should be a bitfield.

> +       /**< Associated transfer direction */
> +       uint16_t hw_queue_id; /**< The HW queue on which to create virt queue */
> +       uint16_t nb_desc; /**< Number of descriptor for this virt queue */
> +       uint64_t dev_flags; /**< Device specific flags */
> +};
> +

> +__rte_experimental
> +static inline dma_cookie_t
> +rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vq_id,
> +                  const struct dma_scatterlist *sg,
> +                  uint32_t sg_len, uint64_t flags)

I would like to change this as:
rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vq_id, const struct
rte_dma_sg *src, uint32_t nb_src,
const struct rte_dma_sg *dst, uint32_t nb_dst) or so allow the use case like
src 30 MB copy can be splitted as written as 1 MB x 30 dst.



> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       return (*dev->copy_sg)(dev, vq_id, sg, sg_len, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a fill operation onto the DMA virt queue
> + *
> + * This queues up a fill operation to be performed by hardware, but does not
> + * trigger hardware to begin that operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue.
> + * @param pattern
> + *   The pattern to populate the destination buffer with.
> + * @param dst
> + *   The address of the destination buffer.
> + * @param length
> + *   The length of the destination buffer.
> + * @param flags
> + *   An opaque flags for this operation.

PLEASE REMOVE opaque stuff from fastpath it will be a pain for
application writers as
they need to write multiple combinations of fastpath. flags are OK, if
we have a valid
generic flag now to control the transfer behavior.


> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Add a fence to force ordering between operations
> + *
> + * This adds a fence to a sequence of operations to enforce ordering, such that
> + * all operations enqueued before the fence must be completed before operations
> + * after the fence.
> + * NOTE: Since this fence may be added as a flag to the last operation enqueued,
> + * this API may not function correctly when called immediately after an
> + * "rte_dmadev_perform" call i.e. before any new operations are enqueued.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue.
> + *
> + * @return
> + *   - =0: Successful add fence.
> + *   - <0: Failure to add fence.
> + *
> + * NOTE: The caller must ensure that the input parameter is valid and the
> + *       corresponding device supports the operation.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_fence(uint16_t dev_id, uint16_t vq_id)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       return (*dev->fence)(dev, vq_id);
> +}

Since HW submission is in a queue(FIFO) the ordering is always
maintained. Right?
Could you share more details and use case of fence() from
driver/application PoV?


> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Trigger hardware to begin performing enqueued operations
> + *
> + * This API is used to write the "doorbell" to the hardware to trigger it
> + * to begin the operations previously enqueued by rte_dmadev_copy/fill()
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue.
> + *
> + * @return
> + *   - =0: Successful trigger hardware.
> + *   - <0: Failure to trigger hardware.
> + *
> + * NOTE: The caller must ensure that the input parameter is valid and the
> + *       corresponding device supports the operation.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_perform(uint16_t dev_id, uint16_t vq_id)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       return (*dev->perform)(dev, vq_id);
> +}

Since we have additional function call overhead in all the
applications for this scheme, I would like to understand
the use of doing this way vs enq does the doorbell implicitly from
driver/application PoV?


> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Returns the number of operations that have been successful completed.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue.
> + * @param nb_cpls
> + *   The maximum number of completed operations that can be processed.
> + * @param[out] cookie
> + *   The last completed operation's cookie.
> + * @param[out] has_error
> + *   Indicates if there are transfer error.
> + *
> + * @return
> + *   The number of operations that successful completed.

successfully

> + *
> + * NOTE: The caller must ensure that the input parameter is valid and the
> + *       corresponding device supports the operation.
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_dmadev_completed(uint16_t dev_id, uint16_t vq_id, const uint16_t nb_cpls,
> +                    dma_cookie_t *cookie, bool *has_error)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       has_error = false;
> +       return (*dev->completed)(dev, vq_id, nb_cpls, cookie, has_error);

It may be better to have cookie/ring_idx as third argument.

> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Returns the number of operations that failed to complete.
> + * NOTE: This API was used when rte_dmadev_completed has_error was set.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue.
(> + * @param nb_status
> + *   Indicates the size  of status array.
> + * @param[out] status
> + *   The error code of operations that failed to complete.
> + * @param[out] cookie
> + *   The last failed completed operation's cookie.
> + *
> + * @return
> + *   The number of operations that failed to complete.
> + *
> + * NOTE: The caller must ensure that the input parameter is valid and the
> + *       corresponding device supports the operation.
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vq_id,
> +                          const uint16_t nb_status, uint32_t *status,
> +                          dma_cookie_t *cookie)

IMO, it is better to move cookie/rind_idx at 3.
Why it would return any array of errors? since it called after
rte_dmadev_completed() has
has_error. Is it better to change

rte_dmadev_error_status((uint16_t dev_id, uint16_t vq_id, dma_cookie_t
*cookie,  uint32_t *status)

I also think, we may need to set status as bitmask and enumerate all
the combination of error codes
of all the driver and return string from driver existing rte_flow_error

See
struct rte_flow_error {
        enum rte_flow_error_type type; /**< Cause field and error types. */
        const void *cause; /**< Object responsible for the error. */
        const char *message; /**< Human-readable error message. */
};

> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       return (*dev->completed_fails)(dev, vq_id, nb_status, status, cookie);
> +}
> +
> +struct rte_dmadev_stats {
> +       uint64_t enqueue_fail_count;
> +       /**< Conut of all operations which failed enqueued */
> +       uint64_t enqueued_count;
> +       /**< Count of all operations which successful enqueued */
> +       uint64_t completed_fail_count;
> +       /**< Count of all operations which failed to complete */
> +       uint64_t completed_count;
> +       /**< Count of all operations which successful complete */
> +};

We need to have capability API to tell which items are
updated/supported by the driver.


> diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
> new file mode 100644
> index 0000000..a3afea2
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev_core.h
> @@ -0,0 +1,98 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright 2021 HiSilicon Limited.
> + */
> +
> +#ifndef _RTE_DMADEV_CORE_H_
> +#define _RTE_DMADEV_CORE_H_
> +
> +/**
> + * @file
> + *
> + * RTE DMA Device internal header.
> + *
> + * This header contains internal data types. But they are still part of the
> + * public API because they are used by inline public functions.
> + */
> +
> +struct rte_dmadev;
> +
> +typedef dma_cookie_t (*dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vq_id,
> +                                     void *src, void *dst,
> +                                     uint32_t length, uint64_t flags);
> +/**< @internal Function used to enqueue a copy operation. */

To avoid namespace conflict(as it is public API) use rte_


> +
> +/**
> + * The data structure associated with each DMA device.
> + */
> +struct rte_dmadev {
> +       /**< Enqueue a copy operation onto the DMA device. */
> +       dmadev_copy_t copy;
> +       /**< Enqueue a scatter list copy operation onto the DMA device. */
> +       dmadev_copy_sg_t copy_sg;
> +       /**< Enqueue a fill operation onto the DMA device. */
> +       dmadev_fill_t fill;
> +       /**< Enqueue a scatter list fill operation onto the DMA device. */
> +       dmadev_fill_sg_t fill_sg;
> +       /**< Add a fence to force ordering between operations. */
> +       dmadev_fence_t fence;
> +       /**< Trigger hardware to begin performing enqueued operations. */
> +       dmadev_perform_t perform;
> +       /**< Returns the number of operations that successful completed. */
> +       dmadev_completed_t completed;
> +       /**< Returns the number of operations that failed to complete. */
> +       dmadev_completed_fails_t completed_fails;

We need to limit fastpath items in 1 CL

> +
> +       void *dev_private; /**< PMD-specific private data */
> +       const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD */
> +
> +       uint16_t dev_id; /**< Device ID for this instance */
> +       int socket_id; /**< Socket ID where memory is allocated */
> +       struct rte_device *device;
> +       /**< Device info. supplied during device initialization */
> +       const char *driver_name; /**< Driver info. supplied by probing */
> +       char name[RTE_DMADEV_NAME_MAX_LEN]; /**< Device name */
> +
> +       RTE_STD_C11
> +       uint8_t attached : 1; /**< Flag indicating the device is attached */
> +       uint8_t started : 1; /**< Device state: STARTED(1)/STOPPED(0) */

Add a couple of reserved fields for future ABI stability.

> +
> +} __rte_cache_aligned;
> +
> +extern struct rte_dmadev rte_dmadevices[];
> +

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
  2021-07-02 13:59 ` Bruce Richardson
  2021-07-04  9:30 ` Jerin Jacob
@ 2021-07-04 14:57 ` Andrew Rybchenko
  2021-07-06  3:56   ` fengchengwen
  2021-07-04 15:21 ` Matan Azrad
                   ` (26 subsequent siblings)
  29 siblings, 1 reply; 339+ messages in thread
From: Andrew Rybchenko @ 2021-07-04 14:57 UTC (permalink / raw)
  To: Chengwen Feng, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, liangma

On 7/2/21 4:18 PM, Chengwen Feng wrote:
> This patch introduces 'dmadevice' which is a generic type of DMA
> device.

"This patch introduces ... " -> "Introduce ..."

> 
> The APIs of dmadev library exposes some generic operations which can
> enable configuration and I/O with the DMA devices.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>

[snip]

> diff --git a/MAINTAINERS b/MAINTAINERS
> index 4347555..2019783 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -496,6 +496,10 @@ F: drivers/raw/skeleton/
>  F: app/test/test_rawdev.c
>  F: doc/guides/prog_guide/rawdev.rst
>  
> +Dma device API

Dma -> DMA

> +M: Chengwen Feng <fengchengwen@huawei.com>
> +F: lib/dmadev/
> +
>  
>  Memory Pool Drivers
>  -------------------

[snip]

> diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
> new file mode 100644
> index 0000000..a94e839
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev.c
> @@ -0,0 +1,438 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright 2021 HiSilicon Limited.
> + */
> +
> +#include <ctype.h>
> +#include <stdlib.h>
> +#include <string.h>
> +#include <stdint.h>
> +
> +#include <rte_log.h>
> +#include <rte_debug.h>
> +#include <rte_dev.h>
> +#include <rte_memory.h>
> +#include <rte_memzone.h>
> +#include <rte_malloc.h>
> +#include <rte_errno.h>
> +#include <rte_string_fns.h>
> +
> +#include "rte_dmadev.h"
> +#include "rte_dmadev_pmd.h"
> +
> +struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
> +
> +uint16_t
> +rte_dmadev_count(void)
> +{
> +	uint16_t count = 0;
> +	uint16_t i;
> +
> +	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
> +		if (rte_dmadevices[i].attached)
> +			count++;
> +	}
> +
> +	return count;
> +}
> +
> +int
> +rte_dmadev_get_dev_id(const char *name)
> +{
> +	uint16_t i;
> +
> +	if (name == NULL)
> +		return -EINVAL;
> +
> +	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++)
> +		if ((strcmp(rte_dmadevices[i].name, name) == 0) &&
> +		    (rte_dmadevices[i].attached == RTE_DMADEV_ATTACHED))
> +			return i;
> +
> +	return -ENODEV;
> +}
> +
> +int
> +rte_dmadev_socket_id(uint16_t dev_id)
> +{
> +	struct rte_dmadev *dev;
> +
> +	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +	dev = &rte_dmadevices[dev_id];
> +
> +	return dev->socket_id;
> +}
> +
> +int
> +rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
> +{
> +	struct rte_dmadev *dev;
> +	int diag;
> +
> +	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(dev_info, -EINVAL);
> +
> +	dev = &rte_dmadevices[dev_id];
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
> +
> +	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
> +	diag = (*dev->dev_ops->dev_info_get)(dev, dev_info);
> +	if (diag != 0)
> +		return diag;
> +
> +	dev_info->device = dev->device;
> +	dev_info->driver_name = dev->driver_name;
> +	dev_info->socket_id = dev->socket_id;
> +
> +	return 0;
> +}
> +
> +int
> +rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
> +{
> +	struct rte_dmadev *dev;
> +	int diag;
> +
> +	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(dev_conf, -EINVAL);
> +
> +	dev = &rte_dmadevices[dev_id];
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
> +
> +	if (dev->started) {
> +		RTE_DMADEV_LOG(ERR,
> +		   "device %u must be stopped to allow configuration", dev_id);
> +		return -EBUSY;
> +	}
> +
> +	diag = (*dev->dev_ops->dev_configure)(dev, dev_conf);
> +	if (diag != 0)
> +		RTE_DMADEV_LOG(ERR, "device %u dev_configure failed, ret = %d",
> +			       dev_id, diag);
> +	else
> +		dev->attached = 1;
> +
> +	return diag;
> +}
> +
> +int
> +rte_dmadev_start(uint16_t dev_id)
> +{
> +	struct rte_dmadev *dev;
> +	int diag;
> +
> +	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +	dev = &rte_dmadevices[dev_id];
> +	if (dev->started != 0) {
> +		RTE_DMADEV_LOG(ERR, "device %u already started", dev_id);
> +		return 0;
> +	}
> +
> +	if (dev->dev_ops->dev_start == NULL)
> +		goto mark_started;
> +
> +	diag = (*dev->dev_ops->dev_start)(dev);
> +	if (diag != 0)
> +		return diag;
> +
> +mark_started:
> +	dev->started = 1;
> +	return 0;
> +}
> +
> +int
> +rte_dmadev_stop(uint16_t dev_id)
> +{
> +	struct rte_dmadev *dev;
> +	int diag;
> +
> +	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +	dev = &rte_dmadevices[dev_id];
> +
> +	if (dev->started == 0) {
> +		RTE_DMADEV_LOG(ERR, "device %u already stopped", dev_id);
> +		return 0;
> +	}
> +
> +	if (dev->dev_ops->dev_stop == NULL)
> +		goto mark_stopped;
> +
> +	diag = (*dev->dev_ops->dev_stop)(dev);
> +	if (diag != 0)
> +		return diag;
> +
> +mark_stopped:
> +	dev->started = 0;
> +	return 0;
> +}
> +
> +int
> +rte_dmadev_close(uint16_t dev_id)
> +{
> +	struct rte_dmadev *dev;
> +
> +	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +	dev = &rte_dmadevices[dev_id];
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
> +
> +	/* Device must be stopped before it can be closed */
> +	if (dev->started == 1) {
> +		RTE_DMADEV_LOG(ERR, "device %u must be stopped before closing",
> +			       dev_id);
> +		return -EBUSY;
> +	}
> +
> +	return (*dev->dev_ops->dev_close)(dev);
> +}
> +
> +int
> +rte_dmadev_reset(uint16_t dev_id)
> +{
> +	struct rte_dmadev *dev;
> +
> +	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +	dev = &rte_dmadevices[dev_id];
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
> +
> +	/* Reset is not dependent on state of the device */
> +	return (*dev->dev_ops->dev_reset)(dev);
> +}
> +
> +int
> +rte_dmadev_queue_setup(uint16_t dev_id,
> +		       const struct rte_dmadev_queue_conf *conf)
> +{
> +	struct rte_dmadev *dev;
> +
> +	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(conf, -EINVAL);
> +
> +	dev = &rte_dmadevices[dev_id];
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
> +
> +	return (*dev->dev_ops->queue_setup)(dev, conf);
> +}
> +
> +int
> +rte_dmadev_queue_release(uint16_t dev_id, uint16_t vq_id)
> +{
> +	struct rte_dmadev *dev;
> +
> +	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +	dev = &rte_dmadevices[dev_id];
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
> +
> +	return (*dev->dev_ops->queue_release)(dev, vq_id);
> +}
> +
> +int
> +rte_dmadev_queue_info_get(uint16_t dev_id, uint16_t vq_id,
> +			  struct rte_dmadev_queue_info *info)
> +{
> +	struct rte_dmadev *dev;
> +
> +	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(info, -EINVAL);
> +
> +	dev = &rte_dmadevices[dev_id];
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_info_get, -ENOTSUP);
> +
> +	memset(info, 0, sizeof(struct rte_dmadev_queue_info));
> +	return (*dev->dev_ops->queue_info_get)(dev, vq_id, info);
> +}
> +
> +int
> +rte_dmadev_stats_get(uint16_t dev_id, int vq_id,
> +		     struct rte_dmadev_stats *stats)
> +{
> +	struct rte_dmadev *dev;
> +
> +	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(stats, -EINVAL);
> +
> +	dev = &rte_dmadevices[dev_id];
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
> +
> +	return (*dev->dev_ops->stats_get)(dev, vq_id, stats);
> +}
> +
> +int
> +rte_dmadev_stats_reset(uint16_t dev_id, int vq_id)
> +{
> +	struct rte_dmadev *dev;
> +
> +	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +	dev = &rte_dmadevices[dev_id];
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
> +
> +	return (*dev->dev_ops->stats_reset)(dev, vq_id);
> +}
> +
> +static int
> +xstats_get_count(uint16_t dev_id)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get_names, -ENOTSUP);
> +
> +	return (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
> +}
> +
> +int
> +rte_dmadev_xstats_names_get(uint16_t dev_id,
> +			    struct rte_dmadev_xstats_name *xstats_names,
> +			    uint32_t size)
> +{
> +	struct rte_dmadev *dev;
> +	int cnt_expected_entries;
> +
> +	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +	cnt_expected_entries = xstats_get_count(dev_id);
> +
> +	if (xstats_names == NULL || cnt_expected_entries < 0 ||
> +	    (int)size < cnt_expected_entries || size == 0)
> +		return cnt_expected_entries;
> +
> +	dev = &rte_dmadevices[dev_id];
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get_names, -ENOTSUP);
> +	return (*dev->dev_ops->xstats_get_names)(dev, xstats_names, size);
> +}
> +
> +int
> +rte_dmadev_xstats_get(uint16_t dev_id, const uint32_t ids[],
> +		      uint64_t values[], uint32_t n)
> +{
> +	struct rte_dmadev *dev;
> +
> +	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(ids, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(values, -EINVAL);
> +
> +	dev = &rte_dmadevices[dev_id];
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get, -ENOTSUP);
> +
> +	return (*dev->dev_ops->xstats_get)(dev, ids, values, n);
> +}
> +
> +int
> +rte_dmadev_xstats_reset(uint16_t dev_id, const uint32_t ids[], uint32_t nb_ids)
> +{
> +	struct rte_dmadev *dev;
> +
> +	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +	dev = &rte_dmadevices[dev_id];
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_reset, -ENOTSUP);
> +
> +	return (*dev->dev_ops->xstats_reset)(dev, ids, nb_ids);
> +}
> +
> +int
> +rte_dmadev_selftest(uint16_t dev_id)
> +{
> +	struct rte_dmadev *dev;
> +
> +	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +	dev = &rte_dmadevices[dev_id];
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
> +
> +	return (*dev->dev_ops->dev_selftest)(dev_id);
> +}
> +
> +static inline uint16_t
> +rte_dmadev_find_free_device_index(void)
> +{
> +	uint16_t i;
> +
> +	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
> +		if (rte_dmadevices[i].attached == RTE_DMADEV_DETACHED)
> +			return i;
> +	}
> +
> +	return RTE_DMADEV_MAX_DEVS;
> +}
> +
> +struct rte_dmadev *
> +rte_dmadev_pmd_allocate(const char *name, size_t dev_priv_size, int socket_id)
> +{
> +	struct rte_dmadev *dev;
> +	uint16_t dev_id;
> +
> +	if (rte_dmadev_get_dev_id(name) >= 0) {
> +		RTE_DMADEV_LOG(ERR,
> +			"device with name %s already allocated!", name);
> +		return NULL;
> +	}
> +
> +	dev_id = rte_dmadev_find_free_device_index();
> +	if (dev_id == RTE_DMADEV_MAX_DEVS) {
> +		RTE_DMADEV_LOG(ERR, "reached maximum number of DMA devices");
> +		return NULL;
> +	}
> +
> +	dev = &rte_dmadevices[dev_id];
> +
> +	if (dev_priv_size > 0) {
> +		dev->dev_private = rte_zmalloc_socket("dmadev private",
> +				     dev_priv_size,
> +				     RTE_CACHE_LINE_SIZE,
> +				     socket_id);
> +		if (dev->dev_private == NULL) {
> +			RTE_DMADEV_LOG(ERR,
> +				"unable to allocate memory for dmadev");
> +			return NULL;
> +		}
> +	}
> +
> +	dev->dev_id = dev_id;
> +	dev->socket_id = socket_id;
> +	dev->started = 0;
> +	strlcpy(dev->name, name, RTE_DMADEV_NAME_MAX_LEN);
> +
> +	dev->attached = RTE_DMADEV_ATTACHED;
> +
> +	return dev;
> +}
> +
> +int
> +rte_dmadev_pmd_release(struct rte_dmadev *dev)
> +{
> +	int ret;
> +
> +	if (dev == NULL)
> +		return -EINVAL;
> +
> +	ret = rte_dmadev_close(dev->dev_id);
> +	if (ret != 0)
> +		return ret;
> +
> +	if (dev->dev_private != NULL)
> +		rte_free(dev->dev_private);
> +
> +	memset(dev, 0, sizeof(struct rte_dmadev));
> +	dev->attached = RTE_DMADEV_DETACHED;
> +
> +	return 0;
> +}
> +
> +RTE_LOG_REGISTER(libdmadev_logtype, lib.dmadev, INFO);
> diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
> new file mode 100644
> index 0000000..f74fc6a
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev.h
> @@ -0,0 +1,919 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright 2021 HiSilicon Limited.
> + */
> +
> +#ifndef _RTE_DMADEV_H_
> +#define _RTE_DMADEV_H_
> +
> +/**
> + * @file rte_dmadev.h
> + *
> + * RTE DMA (Direct Memory Access) device APIs.
> + *
> + * The generic DMA device diagram:
> + *
> + *            ------------     ------------
> + *            | HW-queue |     | HW-queue |
> + *            ------------     ------------
> + *                   \            /
> + *                    \          /
> + *                     \        /
> + *                  ----------------
> + *                  |dma-controller|
> + *                  ----------------
> + *
> + *   The DMA could have multiple HW-queues, each HW-queue could have multiple
> + *   capabilities, e.g. whether to support fill operation, supported DMA
> + *   transfter direction and etc.
> + *
> + * The DMA framework is built on the following abstraction model:
> + *
> + *     ------------    ------------
> + *     |virt-queue|    |virt-queue|
> + *     ------------    ------------

Do we really need "virt" here? "virt queue" could be
incorrectly associated with virtio spec.
IMHO it would be better w/o virt or full "virtual"
everywhere in the documentation.

> + *            \           /
> + *             \         /
> + *              \       /
> + *            ------------     ------------
> + *            | HW-queue |     | HW-queue |
> + *            ------------     ------------
> + *                   \            /
> + *                    \          /
> + *                     \        /
> + *                     ----------
> + *                     | dmadev |
> + *                     ----------
> + *
> + *   a) The DMA operation request must be submitted to the virt queue, virt
> + *      queues must be created based on HW queues, the DMA device could have
> + *      multiple HW queues.

What does define mapping of virtual queues to HW queues? Does
API user sees HW queues? If no, it should be kept transparent
and HW queues should be simply removed from the picture.

> + *   b) The virt queues on the same HW-queue could represent different contexts,
> + *      e.g. user could create virt-queue-0 on HW-queue-0 for mem-to-mem
> + *      transfer scenario, and create virt-queue-1 on the same HW-queue for
> + *      mem-to-dev transfer scenario.
> + *   NOTE: user could also create multiple virt queues for mem-to-mem transfer
> + *         scenario as long as the corresponding driver supports.
> + *
> + * The control plane APIs include configure/queue_setup/queue_release/start/
> + * stop/reset/close, in order to start device work, the call sequence must be
> + * as follows:
> + *     - rte_dmadev_configure()
> + *     - rte_dmadev_queue_setup()
> + *     - rte_dmadev_start()
> + *
> + * The dataplane APIs include two parts:
> + *   a) The first part is the submission of operation requests:
> + *        - rte_dmadev_copy()
> + *        - rte_dmadev_copy_sg() - scatter-gather form of copy
> + *        - rte_dmadev_fill()
> + *        - rte_dmadev_fill_sg() - scatter-gather form of fill
> + *        - rte_dmadev_fence()   - add a fence force ordering between operations
> + *        - rte_dmadev_perform() - issue doorbell to hardware
> + *      These APIs could work with different virt queues which have different
> + *      contexts.
> + *      The first four APIs are used to submit the operation request to the virt
> + *      queue, if the submission is successful, a cookie (as type
> + *      'dma_cookie_t') is returned, otherwise a negative number is returned.
> + *   b) The second part is to obtain the result of requests:
> + *        - rte_dmadev_completed()
> + *            - return the number of operation requests completed successfully.
> + *        - rte_dmadev_completed_fails()
> + *            - return the number of operation requests failed to complete.
> + *
> + * The misc APIs include info_get/queue_info_get/stats/xstats/selftest, provide
> + * information query and self-test capabilities.
> + *
> + * About the dataplane APIs MT-safe, there are two dimensions:
> + *   a) For one virt queue, the submit/completion API could be MT-safe,
> + *      e.g. one thread do submit operation, another thread do completion
> + *      operation.
> + *      If driver support it, then declare RTE_DMA_DEV_CAPA_MT_VQ.
> + *      If driver don't support it, it's up to the application to guarantee
> + *      MT-safe.
> + *   b) For multiple virt queues on the same HW queue, e.g. one thread do
> + *      operation on virt-queue-0, another thread do operation on virt-queue-1.
> + *      If driver support it, then declare RTE_DMA_DEV_CAPA_MT_MVQ.
> + *      If driver don't support it, it's up to the application to guarantee
> + *      MT-safe.
> + */
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +#include <rte_common.h>
> +#include <rte_memory.h>
> +#include <rte_errno.h>
> +#include <rte_compat.h>
> +
> +/**
> + * dma_cookie_t - an opaque DMA cookie
> + *
> + * If dma_cookie_t is >=0 it's a DMA operation request cookie, <0 it's a error
> + * code.
> + * When using cookies, comply with the following rules:
> + * a) Cookies for each virtual queue are independent.
> + * b) For a virt queue, the cookie are monotonically incremented, when it reach
> + *    the INT_MAX, it wraps back to zero.

INT32_MAX

> + * c) The initial cookie of a virt queue is zero, after the device is stopped or
> + *    reset, the virt queue's cookie needs to be reset to zero.
> + * Example:
> + *    step-1: start one dmadev
> + *    step-2: enqueue a copy operation, the cookie return is 0
> + *    step-3: enqueue a copy operation again, the cookie return is 1
> + *    ...
> + *    step-101: stop the dmadev
> + *    step-102: start the dmadev
> + *    step-103: enqueue a copy operation, the cookie return is 0
> + *    ...
> + */
> +typedef int32_t dma_cookie_t;
> +
> +/**
> + * dma_scatterlist - can hold scatter DMA operation request
> + */
> +struct dma_scatterlist {
> +	void *src;
> +	void *dst;
> +	uint32_t length;
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Get the total number of DMA devices that have been successfully
> + * initialised.
> + *
> + * @return
> + *   The total number of usable DMA devices.
> + */
> +__rte_experimental
> +uint16_t
> +rte_dmadev_count(void);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Get the device identifier for the named DMA device.
> + *
> + * @param name
> + *   DMA device name to select the DMA device identifier.
> + *
> + * @return
> + *   Returns DMA device identifier on success.
> + *   - <0: Failure to find named DMA device.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_get_dev_id(const char *name);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Return the NUMA socket to which a device is connected.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   The NUMA socket id to which the device is connected or
> + *   a default of zero if the socket could not be determined.
> + *   - -EINVAL: dev_id value is out of range.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_socket_id(uint16_t dev_id);

It should be rte_dmadev_numa_node(), I guess.

> +
> +/**
> + * The capabilities of a DMA device
> + */
> +#define RTE_DMA_DEV_CAPA_M2M	(1ull << 0) /**< Support mem-to-mem transfer */
> +#define RTE_DMA_DEV_CAPA_M2D	(1ull << 1) /**< Support mem-to-dev transfer */
> +#define RTE_DMA_DEV_CAPA_D2M	(1ull << 2) /**< Support dev-to-mem transfer */
> +#define RTE_DMA_DEV_CAPA_D2D	(1ull << 3) /**< Support dev-to-dev transfer */
> +#define RTE_DMA_DEV_CAPA_COPY	(1ull << 4) /**< Support copy ops */
> +#define RTE_DMA_DEV_CAPA_FILL	(1ull << 5) /**< Support fill ops */
> +#define RTE_DMA_DEV_CAPA_SG	(1ull << 6) /**< Support scatter-gather ops */
> +#define RTE_DMA_DEV_CAPA_FENCE	(1ull << 7) /**< Support fence ops */
> +#define RTE_DMA_DEV_CAPA_IOVA	(1ull << 8) /**< Support IOVA as DMA address */
> +#define RTE_DMA_DEV_CAPA_VA	(1ull << 9) /**< Support VA as DMA address */
> +#define RTE_DMA_DEV_CAPA_MT_VQ	(1ull << 10) /**< Support MT-safe of one virt queue */
> +#define RTE_DMA_DEV_CAPA_MT_MVQ	(1ull << 11) /**< Support MT-safe of multiple virt queues */

Above is very hard to read. Values should be aligned at the
same column.

> +
> +/**
> + * A structure used to retrieve the contextual information of
> + * an DMA device
> + */
> +struct rte_dmadev_info {
> +	/**
> +	 * Fields filled by framewok
> +	 */

Doxygen has a way to document groups. Please, use it.

> +	struct rte_device *device; /**< Generic Device information */
> +	const char *driver_name; /**< Device driver name */
> +	int socket_id; /**< Socket ID where memory is allocated */
> +
> +	/**
> +	 * Specification fields filled by driver
> +	 */
> +	uint64_t dev_capa; /**< Device capabilities (RTE_DMA_DEV_CAPA_) */
> +	uint16_t max_hw_queues; /**< Maximum number of HW queues. */
> +	uint16_t max_vqs_per_hw_queue;
> +	/**< Maximum number of virt queues to allocate per HW queue */
> +	uint16_t max_desc;
> +	/**< Maximum allowed number of virt queue descriptors */
> +	uint16_t min_desc;
> +	/**< Minimum allowed number of virt queue descriptors */
> +
> +	/**
> +	 * Status fields filled by driver
> +	 */
> +	uint16_t nb_hw_queues; /**< Number of HW queues configured */
> +	uint16_t nb_vqs; /**< Number of virt queues configured */
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Retrieve the contextual information of a DMA device.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @param[out] dev_info
> + *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
> + *   contextual information of the device.
> + * @return
> + *   - =0: Success, driver updates the contextual information of the DMA device
> + *   - <0: Error code returned by the driver info get function.
> + *
> + */
> +__rte_experimental
> +int
> +rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
> +
> +/**
> + * dma_address_type
> + */
> +enum dma_address_type {
> +	DMA_ADDRESS_TYPE_IOVA, /**< Use IOVA as dma address */
> +	DMA_ADDRESS_TYPE_VA, /**< Use VA as dma address */
> +};
> +
> +/**
> + * A structure used to configure a DMA device.
> + */
> +struct rte_dmadev_conf {
> +	enum dma_address_type addr_type; /**< Address type to used */
> +	uint16_t nb_hw_queues; /**< Number of HW-queues enable to use */
> +	uint16_t max_vqs; /**< Maximum number of virt queues to use */

Is it total or per HW queue? Please, clarify in the
documetnation.

> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Configure a DMA device.
> + *
> + * This function must be invoked first before any other function in the
> + * API. This function can also be re-invoked when a device is in the
> + * stopped state.
> + *
> + * The caller may use rte_dmadev_info_get() to get the capability of each
> + * resources available for this DMA device.
> + *
> + * @param dev_id
> + *   The identifier of the device to configure.
> + * @param dev_conf
> + *   The DMA device configuration structure encapsulated into rte_dmadev_conf
> + *   object.
> + *
> + * @return
> + *   - =0: Success, device configured.
> + *   - <0: Error code returned by the driver configuration function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Start a DMA device.
> + *
> + * The device start step is the last one and consists of setting the DMA
> + * to start accepting jobs.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - =0: Success, device started.
> + *   - <0: Error code returned by the driver start function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_start(uint16_t dev_id);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Stop a DMA device.
> + *
> + * The device can be restarted with a call to rte_dmadev_start()
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - =0: Success, device stopped.
> + *   - <0: Error code returned by the driver stop function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_stop(uint16_t dev_id);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Close a DMA device.
> + *
> + * The device cannot be restarted after this call.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *  - =0: Successfully closing device
> + *  - <0: Failure to close device
> + */
> +__rte_experimental
> +int
> +rte_dmadev_close(uint16_t dev_id);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Reset a DMA device.
> + *
> + * This is different from cycle of rte_dmadev_start->rte_dmadev_stop in the
> + * sense similar to hard or soft reset.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - =0: Successful reset device.
> + *   - <0: Failure to reset device.
> + *   - (-ENOTSUP): If the device doesn't support this function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_reset(uint16_t dev_id);
> +
> +/**
> + * dma_transfer_direction

Such comments must be avoided.

> + */
> +enum dma_transfer_direction {
> +	DMA_MEM_TO_MEM,
> +	DMA_MEM_TO_DEV,
> +	DMA_DEV_TO_MEM,
> +	DMA_DEV_TO_DEV,
> +};
> +
> +/**
> + * A structure used to configure a DMA virt queue.
> + */
> +struct rte_dmadev_queue_conf {
> +	enum dma_transfer_direction direction;
> +	/**< Associated transfer direction */

Please, put comments before the code if comment is in a
separate line.


> +	uint16_t hw_queue_id; /**< The HW queue on which to create virt queue */

How does caller understand which HW queue ID to use?
Which one should be chosen? May PMD should device
which HW queue to use? Queue configuration can
provide hints to make the right choice: required
capabilties, relation to another virtual queue etc.

> +	uint16_t nb_desc; /**< Number of descriptor for this virt queue */
> +	uint64_t dev_flags; /**< Device specific flags */
> +	void *dev_ctx; /**< Device specific context */

Device specific flags and context sounds bad and adds vendors
specifics  in API. If so, it could be very hard to switch from
vendor to vendor. Do I misunderstand?

> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Allocate and set up a virt queue.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param conf
> + *   The queue configuration structure encapsulated into rte_dmadev_queue_conf
> + *   object.
> + *
> + * @return
> + *   - >=0: Allocate virt queue success, it is virt queue id.
> + *   - <0: Error code returned by the driver queue setup function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_queue_setup(uint16_t dev_id,
> +		       const struct rte_dmadev_queue_conf *conf);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Release a virt queue.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue which return by queue setup.
> + *
> + * @return
> + *   - =0: Successful release the virt queue.
> + *   - <0: Error code returned by the driver queue release function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_queue_release(uint16_t dev_id, uint16_t vq_id);
> +
> +/**
> + * A structure used to retrieve information of a DMA virt queue.
> + */
> +struct rte_dmadev_queue_info {
> +	enum dma_transfer_direction direction;
> +	/**< Associated transfer direction */

Please, put comments before the code if comment is in a
separate line.

> +	uint16_t hw_queue_id; /**< The HW queue on which to create virt queue */
> +	uint16_t nb_desc; /**< Number of descriptor for this virt queue */
> +	uint64_t dev_flags; /**< Device specific flags */
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Retrieve information of a DMA virt queue.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue which return by queue setup.
> + * @param[out] info
> + *   The queue info structure encapsulated into rte_dmadev_queue_info object.
> + *
> + * @return
> + *   - =0: Successful retrieve information.
> + *   - <0: Error code returned by the driver queue release function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_queue_info_get(uint16_t dev_id, uint16_t vq_id,
> +			  struct rte_dmadev_queue_info *info);
> +
> +#include "rte_dmadev_core.h"
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a copy operation onto the DMA virt queue.
> + *
> + * This queues up a copy operation to be performed by hardware, but does not
> + * trigger hardware to begin that operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue.
> + * @param src
> + *   The address of the source buffer.
> + * @param dst
> + *   The address of the destination buffer.
> + * @param length
> + *   The length of the data to be copied.
> + * @param flags
> + *   An opaque flags for this operation.
> + *
> + * @return
> + *   dma_cookie_t: please refer to the corresponding definition.
> + *
> + * NOTE: The caller must ensure that the input parameter is valid and the
> + *       corresponding device supports the operation.
> + */
> +__rte_experimental
> +static inline dma_cookie_t
> +rte_dmadev_copy(uint16_t dev_id, uint16_t vq_id, void *src, void *dst,
> +		uint32_t length, uint64_t flags)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +	return (*dev->copy)(dev, vq_id, src, dst, length, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a scatter list copy operation onto the DMA virt queue.
> + *
> + * This queues up a scatter list copy operation to be performed by hardware,
> + * but does not trigger hardware to begin that operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue.
> + * @param sg
> + *   The pointer of scatterlist.
> + * @param sg_len
> + *   The number of scatterlist elements.
> + * @param flags
> + *   An opaque flags for this operation.
> + *
> + * @return
> + *   dma_cookie_t: please refer to the corresponding definition.
> + *
> + * NOTE: The caller must ensure that the input parameter is valid and the
> + *       corresponding device supports the operation.
> + */
> +__rte_experimental
> +static inline dma_cookie_t
> +rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vq_id,
> +		   const struct dma_scatterlist *sg,
> +		   uint32_t sg_len, uint64_t flags)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +	return (*dev->copy_sg)(dev, vq_id, sg, sg_len, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a fill operation onto the DMA virt queue
> + *
> + * This queues up a fill operation to be performed by hardware, but does not
> + * trigger hardware to begin that operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue.
> + * @param pattern
> + *   The pattern to populate the destination buffer with.
> + * @param dst
> + *   The address of the destination buffer.
> + * @param length
> + *   The length of the destination buffer.
> + * @param flags
> + *   An opaque flags for this operation.
> + *
> + * @return
> + *   dma_cookie_t: please refer to the corresponding definition.
> + *
> + * NOTE: The caller must ensure that the input parameter is valid and the
> + *       corresponding device supports the operation.
> + */
> +__rte_experimental
> +static inline dma_cookie_t
> +rte_dmadev_fill(uint16_t dev_id, uint16_t vq_id, uint64_t pattern,
> +		void *dst, uint32_t length, uint64_t flags)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +	return (*dev->fill)(dev, vq_id, pattern, dst, length, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a scatter list fill operation onto the DMA virt queue
> + *
> + * This queues up a scatter list fill operation to be performed by hardware,
> + * but does not trigger hardware to begin that operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue.
> + * @param pattern
> + *   The pattern to populate the destination buffer with.
> + * @param sg
> + *   The pointer of scatterlist.
> + * @param sg_len
> + *   The number of scatterlist elements.
> + * @param flags
> + *   An opaque flags for this operation.
> + *
> + * @return
> + *   dma_cookie_t: please refer to the corresponding definition.
> + *
> + * NOTE: The caller must ensure that the input parameter is valid and the
> + *       corresponding device supports the operation.
> + */
> +__rte_experimental
> +static inline dma_cookie_t
> +rte_dmadev_fill_sg(uint16_t dev_id, uint16_t vq_id, uint64_t pattern,
> +		   const struct dma_scatterlist *sg, uint32_t sg_len,
> +		   uint64_t flags)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +	return (*dev->fill_sg)(dev, vq_id, pattern, sg, sg_len, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Add a fence to force ordering between operations
> + *
> + * This adds a fence to a sequence of operations to enforce ordering, such that
> + * all operations enqueued before the fence must be completed before operations
> + * after the fence.
> + * NOTE: Since this fence may be added as a flag to the last operation enqueued,
> + * this API may not function correctly when called immediately after an
> + * "rte_dmadev_perform" call i.e. before any new operations are enqueued.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue.
> + *
> + * @return
> + *   - =0: Successful add fence.
> + *   - <0: Failure to add fence.
> + *
> + * NOTE: The caller must ensure that the input parameter is valid and the
> + *       corresponding device supports the operation.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_fence(uint16_t dev_id, uint16_t vq_id)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +	return (*dev->fence)(dev, vq_id);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Trigger hardware to begin performing enqueued operations
> + *
> + * This API is used to write the "doorbell" to the hardware to trigger it
> + * to begin the operations previously enqueued by rte_dmadev_copy/fill()
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue.
> + *
> + * @return
> + *   - =0: Successful trigger hardware.
> + *   - <0: Failure to trigger hardware.
> + *
> + * NOTE: The caller must ensure that the input parameter is valid and the
> + *       corresponding device supports the operation.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_perform(uint16_t dev_id, uint16_t vq_id)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +	return (*dev->perform)(dev, vq_id);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Returns the number of operations that have been successful completed.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue.
> + * @param nb_cpls
> + *   The maximum number of completed operations that can be processed.
> + * @param[out] cookie
> + *   The last completed operation's cookie.
> + * @param[out] has_error
> + *   Indicates if there are transfer error.
> + *
> + * @return
> + *   The number of operations that successful completed.
> + *
> + * NOTE: The caller must ensure that the input parameter is valid and the
> + *       corresponding device supports the operation.
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_dmadev_completed(uint16_t dev_id, uint16_t vq_id, const uint16_t nb_cpls,
> +		     dma_cookie_t *cookie, bool *has_error)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +	has_error = false;
> +	return (*dev->completed)(dev, vq_id, nb_cpls, cookie, has_error);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Returns the number of operations that failed to complete.
> + * NOTE: This API was used when rte_dmadev_completed has_error was set.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue.
> + * @param nb_status
> + *   Indicates the size of status array.
> + * @param[out] status
> + *   The error code of operations that failed to complete.
> + * @param[out] cookie
> + *   The last failed completed operation's cookie.
> + *
> + * @return
> + *   The number of operations that failed to complete.
> + *
> + * NOTE: The caller must ensure that the input parameter is valid and the
> + *       corresponding device supports the operation.
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vq_id,
> +			   const uint16_t nb_status, uint32_t *status,
> +			   dma_cookie_t *cookie)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +	return (*dev->completed_fails)(dev, vq_id, nb_status, status, cookie);
> +}
> +
> +struct rte_dmadev_stats {
> +	uint64_t enqueue_fail_count;
> +	/**< Conut of all operations which failed enqueued */

Please, put comments before the code.

> +	uint64_t enqueued_count;
> +	/**< Count of all operations which successful enqueued */
> +	uint64_t completed_fail_count;
> +	/**< Count of all operations which failed to complete */
> +	uint64_t completed_count;
> +	/**< Count of all operations which successful complete */
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Retrieve basic statistics of a or all DMA virt queue(s).
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue, -1 means all virt queues.
> + * @param[out] stats
> + *   The basic statistics structure encapsulated into rte_dmadev_stats
> + *   object.
> + *
> + * @return
> + *   - =0: Successful retrieve stats.
> + *   - <0: Failure to retrieve stats.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_stats_get(uint16_t dev_id, int vq_id,
> +		     struct rte_dmadev_stats *stats);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Reset basic statistics of a or all DMA virt queue(s).
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue, -1 means all virt queues.
> + *
> + * @return
> + *   - =0: Successful retrieve stats.
> + *   - <0: Failure to retrieve stats.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_stats_reset(uint16_t dev_id, int vq_id);
> +
> +/** Maximum name length for extended statistics counters */
> +#define RTE_DMA_DEV_XSTATS_NAME_SIZE 64
> +
> +/**
> + * A name-key lookup element for extended statistics.
> + *
> + * This structure is used to map between names and ID numbers
> + * for extended ethdev statistics.
> + */
> +struct rte_dmadev_xstats_name {
> +	char name[RTE_DMA_DEV_XSTATS_NAME_SIZE];
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Retrieve names of extended statistics of a DMA device.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param[out] xstats_names
> + *   Block of memory to insert names into. Must be at least size in capacity.
> + *   If set to NULL, function returns required capacity.
> + * @param size
> + *   Capacity of xstats_names (number of names).
> + * @return
> + *   - positive value lower or equal to size: success. The return value
> + *     is the number of entries filled in the stats table.
> + *   - positive value higher than size: error, the given statistics table
> + *     is too small. The return value corresponds to the size that should
> + *     be given to succeed. The entries in the table are not valid and
> + *     shall not be used by the caller.
> + *   - negative value on error.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_xstats_names_get(uint16_t dev_id,
> +			    struct rte_dmadev_xstats_name *xstats_names,
> +			    uint32_t size);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Retrieve extended statistics of a DMA device.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param ids
> + *   The id numbers of the stats to get. The ids can be got from the stat
> + *   position in the stat list from rte_dmadev_get_xstats_names().
> + * @param[out] values
> + *   The values for each stats request by ID.
> + * @param n
> + *   The number of stats requested.
> + *
> + * @return
> + *   - positive value: number of stat entries filled into the values array.
> + *   - negative value on error.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_xstats_get(uint16_t dev_id, const uint32_t ids[],
> +		      uint64_t values[], uint32_t n);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Reset the values of the xstats of the selected component in the device.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param ids
> + *   Selects specific statistics to be reset. When NULL, all statistics
> + *   will be reset. If non-NULL, must point to array of at least
> + *   *nb_ids* size.
> + * @param nb_ids
> + *   The number of ids available from the *ids* array. Ignored when ids is NULL.
> + *
> + * @return
> + *   - zero: successfully reset the statistics to zero.
> + *   - negative value on error.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_xstats_reset(uint16_t dev_id, const uint32_t ids[], uint32_t nb_ids);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Trigger the dmadev self test.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - 0: Selftest successful.
> + *   - -ENOTSUP if the device doesn't support selftest
> + *   - other values < 0 on failure.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_selftest(uint16_t dev_id);
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* _RTE_DMADEV_H_ */
> diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
> new file mode 100644
> index 0000000..a3afea2
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev_core.h
> @@ -0,0 +1,98 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright 2021 HiSilicon Limited.
> + */
> +
> +#ifndef _RTE_DMADEV_CORE_H_
> +#define _RTE_DMADEV_CORE_H_
> +
> +/**
> + * @file
> + *
> + * RTE DMA Device internal header.
> + *
> + * This header contains internal data types. But they are still part of the
> + * public API because they are used by inline public functions.

Do we really want it? Anyway rte_dmadev must not be here.
Some sub-structure could be, but not entire rte_dmadev.

> + */
> +
> +struct rte_dmadev;
> +
> +typedef dma_cookie_t (*dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vq_id,
> +				      void *src, void *dst,
> +				      uint32_t length, uint64_t flags);
> +/**< @internal Function used to enqueue a copy operation. */

Avoid comments after the code in a seprate line. Move it to be
before the code.

> +
> +typedef dma_cookie_t (*dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vq_id,
> +					 const struct dma_scatterlist *sg,
> +					 uint32_t sg_len, uint64_t flags);
> +/**< @internal Function used to enqueue a scatter list copy operation. */
> +
> +typedef dma_cookie_t (*dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vq_id,
> +				      uint64_t pattern, void *dst,
> +				      uint32_t length, uint64_t flags);
> +/**< @internal Function used to enqueue a fill operation. */
> +
> +typedef dma_cookie_t (*dmadev_fill_sg_t)(struct rte_dmadev *dev, uint16_t vq_id,
> +			uint64_t pattern, const struct dma_scatterlist *sg,
> +			uint32_t sg_len, uint64_t flags);
> +/**< @internal Function used to enqueue a scatter list fill operation. */
> +
> +typedef int (*dmadev_fence_t)(struct rte_dmadev *dev, uint16_t vq_id);
> +/**< @internal Function used to add a fence ordering between operations. */
> +
> +typedef int (*dmadev_perform_t)(struct rte_dmadev *dev, uint16_t vq_id);
> +/**< @internal Function used to trigger hardware to begin performing. */
> +
> +typedef uint16_t (*dmadev_completed_t)(struct rte_dmadev *dev, uint16_t vq_id,
> +				       const uint16_t nb_cpls,
> +				       dma_cookie_t *cookie, bool *has_error);
> +/**< @internal Function used to return number of successful completed operations */
> +
> +typedef uint16_t (*dmadev_completed_fails_t)(struct rte_dmadev *dev,
> +			uint16_t vq_id, const uint16_t nb_status,
> +			uint32_t *status, dma_cookie_t *cookie);
> +/**< @internal Function used to return number of failed completed operations */
> +
> +#define RTE_DMADEV_NAME_MAX_LEN	64 /**< Max length of name of DMA PMD */
> +
> +struct rte_dmadev_ops;
> +
> +/**
> + * The data structure associated with each DMA device.
> + */
> +struct rte_dmadev {
> +	/**< Enqueue a copy operation onto the DMA device. */

Comment before code should start from /** (not /**< ).

> +	dmadev_copy_t copy;
> +	/**< Enqueue a scatter list copy operation onto the DMA device. */
> +	dmadev_copy_sg_t copy_sg;
> +	/**< Enqueue a fill operation onto the DMA device. */
> +	dmadev_fill_t fill;
> +	/**< Enqueue a scatter list fill operation onto the DMA device. */
> +	dmadev_fill_sg_t fill_sg;
> +	/**< Add a fence to force ordering between operations. */
> +	dmadev_fence_t fence;
> +	/**< Trigger hardware to begin performing enqueued operations. */
> +	dmadev_perform_t perform;
> +	/**< Returns the number of operations that successful completed. */
> +	dmadev_completed_t completed;
> +	/**< Returns the number of operations that failed to complete. */
> +	dmadev_completed_fails_t completed_fails;
> +
> +	void *dev_private; /**< PMD-specific private data */
> +	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD */
> +
> +	uint16_t dev_id; /**< Device ID for this instance */
> +	int socket_id; /**< Socket ID where memory is allocated */
> +	struct rte_device *device;
> +	/**< Device info. supplied during device initialization */

Please, put comments before the code if comment is in a
separate line.

> +	const char *driver_name; /**< Driver info. supplied by probing */
> +	char name[RTE_DMADEV_NAME_MAX_LEN]; /**< Device name */
> +
> +	RTE_STD_C11
> +	uint8_t attached : 1; /**< Flag indicating the device is attached */
> +	uint8_t started : 1; /**< Device state: STARTED(1)/STOPPED(0) */
> +
> +} __rte_cache_aligned;
> +
> +extern struct rte_dmadev rte_dmadevices[];
> +
> +#endif /* _RTE_DMADEV_CORE_H_ */
> diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h

Let's remove rte_ prefix from DPDK internal headers.

> new file mode 100644
> index 0000000..ef03cf7
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev_pmd.h
> @@ -0,0 +1,210 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright 2021 HiSilicon Limited.
> + */
> +
> +#ifndef _RTE_DMADEV_PMD_H_
> +#define _RTE_DMADEV_PMD_H_
> +
> +/** @file
> + * RTE DMA PMD APIs
> + *
> + * @note
> + * Driver facing APIs for a DMA device. These are not to be called directly by
> + * any application.
> + */
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +#include <string.h>
> +
> +#include <rte_dev.h>
> +#include <rte_log.h>
> +#include <rte_common.h>
> +
> +#include "rte_dmadev.h"
> +
> +extern int libdmadev_logtype;
> +
> +#define RTE_DMADEV_LOG(level, fmt, args...) \

Do we need RTE_ prefix for internal API?

> +	rte_log(RTE_LOG_ ## level, libdmadev_logtype, "%s(): " fmt "\n", \
> +		__func__, ##args)
> +
> +/* Macros to check for valid device */
> +#define RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, retval) do { \
> +	if (!rte_dmadev_pmd_is_valid_dev((dev_id))) { \
> +		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%d", dev_id); \
> +		return retval; \
> +	} \
> +} while (0)
> +
> +#define RTE_DMADEV_VALID_DEVID_OR_RET(dev_id) do { \
> +	if (!rte_dmadev_pmd_is_valid_dev((dev_id))) { \
> +		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%d", dev_id); \
> +		return; \
> +	} \
> +} while (0)
> +
> +#define RTE_DMADEV_DETACHED  0
> +#define RTE_DMADEV_ATTACHED  1

Do we really need RTE_ prefix for interlal defines?

> +
> +/**
> + * Validate if the DMA device index is a valid attached DMA device.
> + *
> + * @param dev_id
> + *   DMA device index.
> + *
> + * @return
> + *   - If the device index is valid (1) or not (0).
> + */
> +static inline unsigned

'unsigned int', but it sounds like the function should return
'bool'.

> +rte_dmadev_pmd_is_valid_dev(uint16_t dev_id)

Again, do we really need rte_ prefix for internal functions?

> +{
> +	struct rte_dmadev *dev;
> +
> +	if (dev_id >= RTE_DMADEV_MAX_DEVS)
> +		return 0;
> +
> +	dev = &rte_dmadevices[dev_id];
> +	if (dev->attached != RTE_DMADEV_ATTACHED)
> +		return 0;
> +	else
> +		return 1;
> +}
> +
> +/**
> + * Definitions of control-plane functions exported by a driver through the
> + * generic structure of type *rte_dmadev_ops* supplied in the *rte_dmadev*
> + * structure associated with a device.
> + */
> +
> +typedef int (*dmadev_info_get_t)(struct rte_dmadev *dev,
> +				 struct rte_dmadev_info *dev_info);
> +/**< @internal Function used to get device information of a device. */

Let's don't use documentation after code in a separate line in
a new code.

> +
> +typedef int (*dmadev_configure_t)(struct rte_dmadev *dev,
> +				  const struct rte_dmadev_conf *dev_conf);
> +/**< @internal Function used to configure a device. */
> +
> +typedef int (*dmadev_start_t)(struct rte_dmadev *dev);
> +/**< @internal Function used to start a configured device. */
> +
> +typedef int (*dmadev_stop_t)(struct rte_dmadev *dev);
> +/**< @internal Function used to stop a configured device. */
> +
> +typedef int (*dmadev_close_t)(struct rte_dmadev *dev);
> +/**< @internal Function used to close a configured device. */
> +
> +typedef int (*dmadev_reset_t)(struct rte_dmadev *dev);
> +/**< @internal Function used to reset a configured device. */
> +
> +typedef int (*dmadev_queue_setup_t)(struct rte_dmadev *dev,
> +				    const struct rte_dmadev_queue_conf *conf);
> +/**< @internal Function used to allocate and set up a virt queue. */
> +
> +typedef int (*dmadev_queue_release_t)(struct rte_dmadev *dev, uint16_t vq_id);
> +/**< @internal Function used to release a virt queue. */
> +
> +typedef int (*dmadev_queue_info_t)(struct rte_dmadev *dev, uint16_t vq_id,
> +				   struct rte_dmadev_queue_info *info);
> +/**< @internal Function used to retrieve information of a virt queue. */
> +
> +typedef int (*dmadev_stats_get_t)(struct rte_dmadev *dev, int vq_id,
> +				  struct rte_dmadev_stats *stats);
> +/**< @internal Function used to retrieve basic statistics. */
> +
> +typedef int (*dmadev_stats_reset_t)(struct rte_dmadev *dev, int vq_id);
> +/**< @internal Function used to reset basic statistics. */
> +
> +typedef int (*dmadev_xstats_get_names_t)(const struct rte_dmadev *dev,
> +		struct rte_dmadev_xstats_name *xstats_names,
> +		uint32_t size);
> +/**< @internal Function used to get names of extended stats. */
> +
> +typedef int (*dmadev_xstats_get_t)(const struct rte_dmadev *dev,
> +		const uint32_t ids[], uint64_t values[], uint32_t n);
> +/**< @internal Function used to retrieve extended stats. */
> +
> +typedef int (*dmadev_xstats_reset_t)(struct rte_dmadev *dev,
> +				     const uint32_t ids[], uint32_t nb_ids);
> +/**< @internal Function used to reset extended stats. */

Do we really need both stats and xstats from the very
beginning? I think it is better to start from just
generic stats and add xstats when it is really required.

> +
> +typedef int (*dmadev_selftest_t)(uint16_t dev_id);
> +/**< @internal Function used to start dmadev selftest. */
> +
> +/** DMA device operations function pointer table */
> +struct rte_dmadev_ops {

Do we need rte_ prefiix for internal data types?

> +	/**< Get device info. */
> +	dmadev_info_get_t dev_info_get;
> +	/**< Configure device. */
> +	dmadev_configure_t dev_configure;
> +	/**< Start device. */
> +	dmadev_start_t dev_start;
> +	/**< Stop device. */
> +	dmadev_stop_t dev_stop;
> +	/**< Close device. */
> +	dmadev_close_t dev_close;
> +	/**< Reset device. */
> +	dmadev_reset_t dev_reset;
> +
> +	/**< Allocate and set up a virt queue. */
> +	dmadev_queue_setup_t queue_setup;
> +	/**< Release a virt queue. */
> +	dmadev_queue_release_t queue_release;
> +	/**< Retrieve information of a virt queue */
> +	dmadev_queue_info_t queue_info_get;
> +
> +	/**< Get basic statistics. */
> +	dmadev_stats_get_t stats_get;
> +	/**< Reset basic statistics. */
> +	dmadev_stats_reset_t stats_reset;
> +	/**< Get names of extended stats. */
> +	dmadev_xstats_get_names_t xstats_get_names;
> +	/**< Get extended statistics. */
> +	dmadev_xstats_get_t xstats_get;
> +	/**< Reset extended statistics values. */
> +	dmadev_xstats_reset_t xstats_reset;
> +
> +	/**< Device selftest function */
> +	dmadev_selftest_t dev_selftest;
> +};
> +
> +/**
> + * Allocates a new dmadev slot for an DMA device and returns the pointer
> + * to that slot for the driver to use.
> + *
> + * @param name
> + *   Unique identifier name for each device
> + * @param dev_private_size
> + *   Size of private data memory allocated within rte_dmadev object.
> + *   Set to 0 to disable internal memory allocation and allow for
> + *   self-allocation.
> + * @param socket_id
> + *   Socket to allocate resources on.

It should be numa_node. See recent mails from Thomas M.

> + *
> + * @return
> + *   - NULL: Failure to allocate
> + *   - Other: The rte_dmadev structure pointer for the new device
> + */
> +struct rte_dmadev *
> +rte_dmadev_pmd_allocate(const char *name, size_t dev_private_size,
> +			int socket_id);
> +
> +/**
> + * Release the specified dmadev device.

"dmadev device" sounds strange. May be "DMA device"?

> + *
> + * @param dev
> + *   The *dmadev* pointer is the address of the *rte_dmadev* structure.
> + *
> + * @return
> + *   - 0 on success, negative on error
> + */
> +int
> +rte_dmadev_pmd_release(struct rte_dmadev *dev);
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* _RTE_DMADEV_PMD_H_ */

[snip]

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (2 preceding siblings ...)
  2021-07-04 14:57 ` Andrew Rybchenko
@ 2021-07-04 15:21 ` Matan Azrad
  2021-07-06  6:25   ` fengchengwen
  2021-07-06 20:28 ` [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates Bruce Richardson
                   ` (25 subsequent siblings)
  29 siblings, 1 reply; 339+ messages in thread
From: Matan Azrad @ 2021-07-04 15:21 UTC (permalink / raw)
  To: Chengwen Feng, NBU-Contact-Thomas Monjalon, ferruh.yigit,
	bruce.richardson, jerinj, jerinjacobk
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, liangma



From: Chengwen Feng
> This patch introduces 'dmadevice' which is a generic type of DMA
> device.
> 
> The APIs of dmadev library exposes some generic operations which can
> enable configuration and I/O with the DMA devices.
> 
Did you consider RTE_COMP_ALGO_NULL xform in compressdev library?

> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> ---
>  MAINTAINERS                  |   4 +
>  config/rte_config.h          |   3 +
>  lib/dmadev/meson.build       |   6 +
>  lib/dmadev/rte_dmadev.c      | 438 +++++++++++++++++++++
>  lib/dmadev/rte_dmadev.h      | 919
> +++++++++++++++++++++++++++++++++++++++++++
>  lib/dmadev/rte_dmadev_core.h |  98 +++++
>  lib/dmadev/rte_dmadev_pmd.h  | 210 ++++++++++
>  lib/dmadev/version.map       |  32 ++
>  lib/meson.build              |   1 +
>  9 files changed, 1711 insertions(+)
>  create mode 100644 lib/dmadev/meson.build
>  create mode 100644 lib/dmadev/rte_dmadev.c
>  create mode 100644 lib/dmadev/rte_dmadev.h
>  create mode 100644 lib/dmadev/rte_dmadev_core.h
>  create mode 100644 lib/dmadev/rte_dmadev_pmd.h
>  create mode 100644 lib/dmadev/version.map
> 
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 4347555..2019783 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -496,6 +496,10 @@ F: drivers/raw/skeleton/
>  F: app/test/test_rawdev.c
>  F: doc/guides/prog_guide/rawdev.rst
> 
> +Dma device API
> +M: Chengwen Feng <fengchengwen@huawei.com>
> +F: lib/dmadev/
> +
> 
>  Memory Pool Drivers
>  -------------------
> diff --git a/config/rte_config.h b/config/rte_config.h
> index 590903c..331a431 100644
> --- a/config/rte_config.h
> +++ b/config/rte_config.h
> @@ -81,6 +81,9 @@
>  /* rawdev defines */
>  #define RTE_RAWDEV_MAX_DEVS 64
> 
> +/* dmadev defines */
> +#define RTE_DMADEV_MAX_DEVS 64
> +
>  /* ip_fragmentation defines */
>  #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
>  #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
> diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
> new file mode 100644
> index 0000000..c918dae
> --- /dev/null
> +++ b/lib/dmadev/meson.build
> @@ -0,0 +1,6 @@
> +# SPDX-License-Identifier: BSD-3-Clause
> +# Copyright(c) 2021 HiSilicon Limited.
> +
> +sources = files('rte_dmadev.c')
> +headers = files('rte_dmadev.h', 'rte_dmadev_pmd.h')
> +indirect_headers += files('rte_dmadev_core.h')
> diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
> new file mode 100644
> index 0000000..a94e839
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev.c
> @@ -0,0 +1,438 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright 2021 HiSilicon Limited.
> + */
> +
> +#include <ctype.h>
> +#include <stdlib.h>
> +#include <string.h>
> +#include <stdint.h>
> +
> +#include <rte_log.h>
> +#include <rte_debug.h>
> +#include <rte_dev.h>
> +#include <rte_memory.h>
> +#include <rte_memzone.h>
> +#include <rte_malloc.h>
> +#include <rte_errno.h>
> +#include <rte_string_fns.h>
> +
> +#include "rte_dmadev.h"
> +#include "rte_dmadev_pmd.h"
> +
> +struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
> +
> +uint16_t
> +rte_dmadev_count(void)
> +{
> +       uint16_t count = 0;
> +       uint16_t i;
> +
> +       for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
> +               if (rte_dmadevices[i].attached)
> +                       count++;
> +       }
> +
> +       return count;
> +}
> +
> +int
> +rte_dmadev_get_dev_id(const char *name)
> +{
> +       uint16_t i;
> +
> +       if (name == NULL)
> +               return -EINVAL;
> +
> +       for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++)
> +               if ((strcmp(rte_dmadevices[i].name, name) == 0) &&
> +                   (rte_dmadevices[i].attached == RTE_DMADEV_ATTACHED))
> +                       return i;
> +
> +       return -ENODEV;
> +}
> +
> +int
> +rte_dmadev_socket_id(uint16_t dev_id)
> +{
> +       struct rte_dmadev *dev;
> +
> +       RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +       dev = &rte_dmadevices[dev_id];
> +
> +       return dev->socket_id;
> +}
> +
> +int
> +rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
> +{
> +       struct rte_dmadev *dev;
> +       int diag;
> +
> +       RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +       RTE_FUNC_PTR_OR_ERR_RET(dev_info, -EINVAL);
> +
> +       dev = &rte_dmadevices[dev_id];
> +
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -
> ENOTSUP);
> +
> +       memset(dev_info, 0, sizeof(struct rte_dmadev_info));
> +       diag = (*dev->dev_ops->dev_info_get)(dev, dev_info);
> +       if (diag != 0)
> +               return diag;
> +
> +       dev_info->device = dev->device;
> +       dev_info->driver_name = dev->driver_name;
> +       dev_info->socket_id = dev->socket_id;
> +
> +       return 0;
> +}
> +
> +int
> +rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf
> *dev_conf)
> +{
> +       struct rte_dmadev *dev;
> +       int diag;
> +
> +       RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +       RTE_FUNC_PTR_OR_ERR_RET(dev_conf, -EINVAL);
> +
> +       dev = &rte_dmadevices[dev_id];
> +
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -
> ENOTSUP);
> +
> +       if (dev->started) {
> +               RTE_DMADEV_LOG(ERR,
> +                  "device %u must be stopped to allow configuration", dev_id);
> +               return -EBUSY;
> +       }
> +
> +       diag = (*dev->dev_ops->dev_configure)(dev, dev_conf);
> +       if (diag != 0)
> +               RTE_DMADEV_LOG(ERR, "device %u dev_configure failed, ret =
> %d",
> +                              dev_id, diag);
> +       else
> +               dev->attached = 1;
> +
> +       return diag;
> +}
> +
> +int
> +rte_dmadev_start(uint16_t dev_id)
> +{
> +       struct rte_dmadev *dev;
> +       int diag;
> +
> +       RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +       dev = &rte_dmadevices[dev_id];
> +       if (dev->started != 0) {
> +               RTE_DMADEV_LOG(ERR, "device %u already started", dev_id);
> +               return 0;
> +       }
> +
> +       if (dev->dev_ops->dev_start == NULL)
> +               goto mark_started;
> +
> +       diag = (*dev->dev_ops->dev_start)(dev);
> +       if (diag != 0)
> +               return diag;
> +
> +mark_started:
> +       dev->started = 1;
> +       return 0;
> +}
> +
> +int
> +rte_dmadev_stop(uint16_t dev_id)
> +{
> +       struct rte_dmadev *dev;
> +       int diag;
> +
> +       RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +       dev = &rte_dmadevices[dev_id];
> +
> +       if (dev->started == 0) {
> +               RTE_DMADEV_LOG(ERR, "device %u already stopped", dev_id);
> +               return 0;
> +       }
> +
> +       if (dev->dev_ops->dev_stop == NULL)
> +               goto mark_stopped;
> +
> +       diag = (*dev->dev_ops->dev_stop)(dev);
> +       if (diag != 0)
> +               return diag;
> +
> +mark_stopped:
> +       dev->started = 0;
> +       return 0;
> +}
> +
> +int
> +rte_dmadev_close(uint16_t dev_id)
> +{
> +       struct rte_dmadev *dev;
> +
> +       RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +       dev = &rte_dmadevices[dev_id];
> +
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
> +
> +       /* Device must be stopped before it can be closed */
> +       if (dev->started == 1) {
> +               RTE_DMADEV_LOG(ERR, "device %u must be stopped before
> closing",
> +                              dev_id);
> +               return -EBUSY;
> +       }
> +
> +       return (*dev->dev_ops->dev_close)(dev);
> +}
> +
> +int
> +rte_dmadev_reset(uint16_t dev_id)
> +{
> +       struct rte_dmadev *dev;
> +
> +       RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +       dev = &rte_dmadevices[dev_id];
> +
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
> +
> +       /* Reset is not dependent on state of the device */
> +       return (*dev->dev_ops->dev_reset)(dev);
> +}
> +
> +int
> +rte_dmadev_queue_setup(uint16_t dev_id,
> +                      const struct rte_dmadev_queue_conf *conf)
> +{
> +       struct rte_dmadev *dev;
> +
> +       RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +       RTE_FUNC_PTR_OR_ERR_RET(conf, -EINVAL);
> +
> +       dev = &rte_dmadevices[dev_id];
> +
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -
> ENOTSUP);
> +
> +       return (*dev->dev_ops->queue_setup)(dev, conf);
> +}
> +
> +int
> +rte_dmadev_queue_release(uint16_t dev_id, uint16_t vq_id)
> +{
> +       struct rte_dmadev *dev;
> +
> +       RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +       dev = &rte_dmadevices[dev_id];
> +
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -
> ENOTSUP);
> +
> +       return (*dev->dev_ops->queue_release)(dev, vq_id);
> +}
> +
> +int
> +rte_dmadev_queue_info_get(uint16_t dev_id, uint16_t vq_id,
> +                         struct rte_dmadev_queue_info *info)
> +{
> +       struct rte_dmadev *dev;
> +
> +       RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +       RTE_FUNC_PTR_OR_ERR_RET(info, -EINVAL);
> +
> +       dev = &rte_dmadevices[dev_id];
> +
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_info_get, -
> ENOTSUP);
> +
> +       memset(info, 0, sizeof(struct rte_dmadev_queue_info));
> +       return (*dev->dev_ops->queue_info_get)(dev, vq_id, info);
> +}
> +
> +int
> +rte_dmadev_stats_get(uint16_t dev_id, int vq_id,
> +                    struct rte_dmadev_stats *stats)
> +{
> +       struct rte_dmadev *dev;
> +
> +       RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +       RTE_FUNC_PTR_OR_ERR_RET(stats, -EINVAL);
> +
> +       dev = &rte_dmadevices[dev_id];
> +
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
> +
> +       return (*dev->dev_ops->stats_get)(dev, vq_id, stats);
> +}
> +
> +int
> +rte_dmadev_stats_reset(uint16_t dev_id, int vq_id)
> +{
> +       struct rte_dmadev *dev;
> +
> +       RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +       dev = &rte_dmadevices[dev_id];
> +
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -
> ENOTSUP);
> +
> +       return (*dev->dev_ops->stats_reset)(dev, vq_id);
> +}
> +
> +static int
> +xstats_get_count(uint16_t dev_id)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get_names, -
> ENOTSUP);
> +
> +       return (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
> +}
> +
> +int
> +rte_dmadev_xstats_names_get(uint16_t dev_id,
> +                           struct rte_dmadev_xstats_name *xstats_names,
> +                           uint32_t size)
> +{
> +       struct rte_dmadev *dev;
> +       int cnt_expected_entries;
> +
> +       RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +       cnt_expected_entries = xstats_get_count(dev_id);
> +
> +       if (xstats_names == NULL || cnt_expected_entries < 0 ||
> +           (int)size < cnt_expected_entries || size == 0)
> +               return cnt_expected_entries;
> +
> +       dev = &rte_dmadevices[dev_id];
> +
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get_names, -
> ENOTSUP);
> +       return (*dev->dev_ops->xstats_get_names)(dev, xstats_names, size);
> +}
> +
> +int
> +rte_dmadev_xstats_get(uint16_t dev_id, const uint32_t ids[],
> +                     uint64_t values[], uint32_t n)
> +{
> +       struct rte_dmadev *dev;
> +
> +       RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +       RTE_FUNC_PTR_OR_ERR_RET(ids, -EINVAL);
> +       RTE_FUNC_PTR_OR_ERR_RET(values, -EINVAL);
> +
> +       dev = &rte_dmadevices[dev_id];
> +
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get, -
> ENOTSUP);
> +
> +       return (*dev->dev_ops->xstats_get)(dev, ids, values, n);
> +}
> +
> +int
> +rte_dmadev_xstats_reset(uint16_t dev_id, const uint32_t ids[], uint32_t
> nb_ids)
> +{
> +       struct rte_dmadev *dev;
> +
> +       RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +       dev = &rte_dmadevices[dev_id];
> +
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_reset, -
> ENOTSUP);
> +
> +       return (*dev->dev_ops->xstats_reset)(dev, ids, nb_ids);
> +}
> +
> +int
> +rte_dmadev_selftest(uint16_t dev_id)
> +{
> +       struct rte_dmadev *dev;
> +
> +       RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +       dev = &rte_dmadevices[dev_id];
> +
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -
> ENOTSUP);
> +
> +       return (*dev->dev_ops->dev_selftest)(dev_id);
> +}
> +
> +static inline uint16_t
> +rte_dmadev_find_free_device_index(void)
> +{
> +       uint16_t i;
> +
> +       for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
> +               if (rte_dmadevices[i].attached == RTE_DMADEV_DETACHED)
> +                       return i;
> +       }
> +
> +       return RTE_DMADEV_MAX_DEVS;
> +}
> +
> +struct rte_dmadev *
> +rte_dmadev_pmd_allocate(const char *name, size_t dev_priv_size, int
> socket_id)
> +{
> +       struct rte_dmadev *dev;
> +       uint16_t dev_id;
> +
> +       if (rte_dmadev_get_dev_id(name) >= 0) {
> +               RTE_DMADEV_LOG(ERR,
> +                       "device with name %s already allocated!", name);
> +               return NULL;
> +       }
> +
> +       dev_id = rte_dmadev_find_free_device_index();
> +       if (dev_id == RTE_DMADEV_MAX_DEVS) {
> +               RTE_DMADEV_LOG(ERR, "reached maximum number of DMA
> devices");
> +               return NULL;
> +       }
> +
> +       dev = &rte_dmadevices[dev_id];
> +
> +       if (dev_priv_size > 0) {
> +               dev->dev_private = rte_zmalloc_socket("dmadev private",
> +                                    dev_priv_size,
> +                                    RTE_CACHE_LINE_SIZE,
> +                                    socket_id);
> +               if (dev->dev_private == NULL) {
> +                       RTE_DMADEV_LOG(ERR,
> +                               "unable to allocate memory for dmadev");
> +                       return NULL;
> +               }
> +       }
> +
> +       dev->dev_id = dev_id;
> +       dev->socket_id = socket_id;
> +       dev->started = 0;
> +       strlcpy(dev->name, name, RTE_DMADEV_NAME_MAX_LEN);
> +
> +       dev->attached = RTE_DMADEV_ATTACHED;
> +
> +       return dev;
> +}
> +
> +int
> +rte_dmadev_pmd_release(struct rte_dmadev *dev)
> +{
> +       int ret;
> +
> +       if (dev == NULL)
> +               return -EINVAL;
> +
> +       ret = rte_dmadev_close(dev->dev_id);
> +       if (ret != 0)
> +               return ret;
> +
> +       if (dev->dev_private != NULL)
> +               rte_free(dev->dev_private);
> +
> +       memset(dev, 0, sizeof(struct rte_dmadev));
> +       dev->attached = RTE_DMADEV_DETACHED;
> +
> +       return 0;
> +}
> +
> +RTE_LOG_REGISTER(libdmadev_logtype, lib.dmadev, INFO);
> diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
> new file mode 100644
> index 0000000..f74fc6a
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev.h
> @@ -0,0 +1,919 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright 2021 HiSilicon Limited.
> + */
> +
> +#ifndef _RTE_DMADEV_H_
> +#define _RTE_DMADEV_H_
> +
> +/**
> + * @file rte_dmadev.h
> + *
> + * RTE DMA (Direct Memory Access) device APIs.
> + *
> + * The generic DMA device diagram:
> + *
> + *            ------------     ------------
> + *            | HW-queue |     | HW-queue |
> + *            ------------     ------------
> + *                   \            /
> + *                    \          /
> + *                     \        /
> + *                  ----------------
> + *                  |dma-controller|
> + *                  ----------------
> + *
> + *   The DMA could have multiple HW-queues, each HW-queue could have
> multiple
> + *   capabilities, e.g. whether to support fill operation, supported DMA
> + *   transfter direction and etc.
> + *
> + * The DMA framework is built on the following abstraction model:
> + *
> + *     ------------    ------------
> + *     |virt-queue|    |virt-queue|
> + *     ------------    ------------
> + *            \           /
> + *             \         /
> + *              \       /
> + *            ------------     ------------
> + *            | HW-queue |     | HW-queue |
> + *            ------------     ------------
> + *                   \            /
> + *                    \          /
> + *                     \        /
> + *                     ----------
> + *                     | dmadev |
> + *                     ----------
> + *
> + *   a) The DMA operation request must be submitted to the virt queue, virt
> + *      queues must be created based on HW queues, the DMA device could
> have
> + *      multiple HW queues.
> + *   b) The virt queues on the same HW-queue could represent different
> contexts,
> + *      e.g. user could create virt-queue-0 on HW-queue-0 for mem-to-mem
> + *      transfer scenario, and create virt-queue-1 on the same HW-queue for
> + *      mem-to-dev transfer scenario.
> + *   NOTE: user could also create multiple virt queues for mem-to-mem
> transfer
> + *         scenario as long as the corresponding driver supports.
> + *
> + * The control plane APIs include
> configure/queue_setup/queue_release/start/
> + * stop/reset/close, in order to start device work, the call sequence must be
> + * as follows:
> + *     - rte_dmadev_configure()
> + *     - rte_dmadev_queue_setup()
> + *     - rte_dmadev_start()
> + *
> + * The dataplane APIs include two parts:
> + *   a) The first part is the submission of operation requests:
> + *        - rte_dmadev_copy()
> + *        - rte_dmadev_copy_sg() - scatter-gather form of copy
> + *        - rte_dmadev_fill()
> + *        - rte_dmadev_fill_sg() - scatter-gather form of fill
> + *        - rte_dmadev_fence()   - add a fence force ordering between
> operations
> + *        - rte_dmadev_perform() - issue doorbell to hardware
> + *      These APIs could work with different virt queues which have different
> + *      contexts.
> + *      The first four APIs are used to submit the operation request to the virt
> + *      queue, if the submission is successful, a cookie (as type
> + *      'dma_cookie_t') is returned, otherwise a negative number is returned.
> + *   b) The second part is to obtain the result of requests:
> + *        - rte_dmadev_completed()
> + *            - return the number of operation requests completed successfully.
> + *        - rte_dmadev_completed_fails()
> + *            - return the number of operation requests failed to complete.
> + *
> + * The misc APIs include info_get/queue_info_get/stats/xstats/selftest,
> provide
> + * information query and self-test capabilities.
> + *
> + * About the dataplane APIs MT-safe, there are two dimensions:
> + *   a) For one virt queue, the submit/completion API could be MT-safe,
> + *      e.g. one thread do submit operation, another thread do completion
> + *      operation.
> + *      If driver support it, then declare RTE_DMA_DEV_CAPA_MT_VQ.
> + *      If driver don't support it, it's up to the application to guarantee
> + *      MT-safe.
> + *   b) For multiple virt queues on the same HW queue, e.g. one thread do
> + *      operation on virt-queue-0, another thread do operation on virt-queue-
> 1.
> + *      If driver support it, then declare RTE_DMA_DEV_CAPA_MT_MVQ.
> + *      If driver don't support it, it's up to the application to guarantee
> + *      MT-safe.
> + */
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +#include <rte_common.h>
> +#include <rte_memory.h>
> +#include <rte_errno.h>
> +#include <rte_compat.h>
> +
> +/**
> + * dma_cookie_t - an opaque DMA cookie
> + *
> + * If dma_cookie_t is >=0 it's a DMA operation request cookie, <0 it's a error
> + * code.
> + * When using cookies, comply with the following rules:
> + * a) Cookies for each virtual queue are independent.
> + * b) For a virt queue, the cookie are monotonically incremented, when it
> reach
> + *    the INT_MAX, it wraps back to zero.
> + * c) The initial cookie of a virt queue is zero, after the device is stopped or
> + *    reset, the virt queue's cookie needs to be reset to zero.
> + * Example:
> + *    step-1: start one dmadev
> + *    step-2: enqueue a copy operation, the cookie return is 0
> + *    step-3: enqueue a copy operation again, the cookie return is 1
> + *    ...
> + *    step-101: stop the dmadev
> + *    step-102: start the dmadev
> + *    step-103: enqueue a copy operation, the cookie return is 0
> + *    ...
> + */
> +typedef int32_t dma_cookie_t;
> +
> +/**
> + * dma_scatterlist - can hold scatter DMA operation request
> + */
> +struct dma_scatterlist {
> +       void *src;
> +       void *dst;
> +       uint32_t length;
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Get the total number of DMA devices that have been successfully
> + * initialised.
> + *
> + * @return
> + *   The total number of usable DMA devices.
> + */
> +__rte_experimental
> +uint16_t
> +rte_dmadev_count(void);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Get the device identifier for the named DMA device.
> + *
> + * @param name
> + *   DMA device name to select the DMA device identifier.
> + *
> + * @return
> + *   Returns DMA device identifier on success.
> + *   - <0: Failure to find named DMA device.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_get_dev_id(const char *name);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Return the NUMA socket to which a device is connected.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   The NUMA socket id to which the device is connected or
> + *   a default of zero if the socket could not be determined.
> + *   - -EINVAL: dev_id value is out of range.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_socket_id(uint16_t dev_id);
> +
> +/**
> + * The capabilities of a DMA device
> + */
> +#define RTE_DMA_DEV_CAPA_M2M   (1ull << 0) /**< Support mem-to-
> mem transfer */
> +#define RTE_DMA_DEV_CAPA_M2D   (1ull << 1) /**< Support mem-to-dev
> transfer */
> +#define RTE_DMA_DEV_CAPA_D2M   (1ull << 2) /**< Support dev-to-mem
> transfer */
> +#define RTE_DMA_DEV_CAPA_D2D   (1ull << 3) /**< Support dev-to-dev
> transfer */
> +#define RTE_DMA_DEV_CAPA_COPY  (1ull << 4) /**< Support copy ops */
> +#define RTE_DMA_DEV_CAPA_FILL  (1ull << 5) /**< Support fill ops */
> +#define RTE_DMA_DEV_CAPA_SG    (1ull << 6) /**< Support scatter-gather
> ops */
> +#define RTE_DMA_DEV_CAPA_FENCE (1ull << 7) /**< Support fence ops */
> +#define RTE_DMA_DEV_CAPA_IOVA  (1ull << 8) /**< Support IOVA as
> DMA address */
> +#define RTE_DMA_DEV_CAPA_VA    (1ull << 9) /**< Support VA as DMA
> address */
> +#define RTE_DMA_DEV_CAPA_MT_VQ (1ull << 10) /**< Support MT-safe
> of one virt queue */
> +#define RTE_DMA_DEV_CAPA_MT_MVQ        (1ull << 11) /**< Support MT-
> safe of multiple virt queues */
> +
> +/**
> + * A structure used to retrieve the contextual information of
> + * an DMA device
> + */
> +struct rte_dmadev_info {
> +       /**
> +        * Fields filled by framewok
> +        */
> +       struct rte_device *device; /**< Generic Device information */
> +       const char *driver_name; /**< Device driver name */
> +       int socket_id; /**< Socket ID where memory is allocated */
> +
> +       /**
> +        * Specification fields filled by driver
> +        */
> +       uint64_t dev_capa; /**< Device capabilities (RTE_DMA_DEV_CAPA_) */
> +       uint16_t max_hw_queues; /**< Maximum number of HW queues. */
> +       uint16_t max_vqs_per_hw_queue;
> +       /**< Maximum number of virt queues to allocate per HW queue */
> +       uint16_t max_desc;
> +       /**< Maximum allowed number of virt queue descriptors */
> +       uint16_t min_desc;
> +       /**< Minimum allowed number of virt queue descriptors */
> +
> +       /**
> +        * Status fields filled by driver
> +        */
> +       uint16_t nb_hw_queues; /**< Number of HW queues configured */
> +       uint16_t nb_vqs; /**< Number of virt queues configured */
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Retrieve the contextual information of a DMA device.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @param[out] dev_info
> + *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
> + *   contextual information of the device.
> + * @return
> + *   - =0: Success, driver updates the contextual information of the DMA
> device
> + *   - <0: Error code returned by the driver info get function.
> + *
> + */
> +__rte_experimental
> +int
> +rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info
> *dev_info);
> +
> +/**
> + * dma_address_type
> + */
> +enum dma_address_type {
> +       DMA_ADDRESS_TYPE_IOVA, /**< Use IOVA as dma address */
> +       DMA_ADDRESS_TYPE_VA, /**< Use VA as dma address */
> +};
> +
> +/**
> + * A structure used to configure a DMA device.
> + */
> +struct rte_dmadev_conf {
> +       enum dma_address_type addr_type; /**< Address type to used */
> +       uint16_t nb_hw_queues; /**< Number of HW-queues enable to use */
> +       uint16_t max_vqs; /**< Maximum number of virt queues to use */
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Configure a DMA device.
> + *
> + * This function must be invoked first before any other function in the
> + * API. This function can also be re-invoked when a device is in the
> + * stopped state.
> + *
> + * The caller may use rte_dmadev_info_get() to get the capability of each
> + * resources available for this DMA device.
> + *
> + * @param dev_id
> + *   The identifier of the device to configure.
> + * @param dev_conf
> + *   The DMA device configuration structure encapsulated into
> rte_dmadev_conf
> + *   object.
> + *
> + * @return
> + *   - =0: Success, device configured.
> + *   - <0: Error code returned by the driver configuration function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf
> *dev_conf);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Start a DMA device.
> + *
> + * The device start step is the last one and consists of setting the DMA
> + * to start accepting jobs.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - =0: Success, device started.
> + *   - <0: Error code returned by the driver start function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_start(uint16_t dev_id);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Stop a DMA device.
> + *
> + * The device can be restarted with a call to rte_dmadev_start()
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - =0: Success, device stopped.
> + *   - <0: Error code returned by the driver stop function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_stop(uint16_t dev_id);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Close a DMA device.
> + *
> + * The device cannot be restarted after this call.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *  - =0: Successfully closing device
> + *  - <0: Failure to close device
> + */
> +__rte_experimental
> +int
> +rte_dmadev_close(uint16_t dev_id);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Reset a DMA device.
> + *
> + * This is different from cycle of rte_dmadev_start->rte_dmadev_stop in
> the
> + * sense similar to hard or soft reset.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - =0: Successful reset device.
> + *   - <0: Failure to reset device.
> + *   - (-ENOTSUP): If the device doesn't support this function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_reset(uint16_t dev_id);
> +
> +/**
> + * dma_transfer_direction
> + */
> +enum dma_transfer_direction {
> +       DMA_MEM_TO_MEM,
> +       DMA_MEM_TO_DEV,
> +       DMA_DEV_TO_MEM,
> +       DMA_DEV_TO_DEV,
> +};
> +
> +/**
> + * A structure used to configure a DMA virt queue.
> + */
> +struct rte_dmadev_queue_conf {
> +       enum dma_transfer_direction direction;
> +       /**< Associated transfer direction */
> +       uint16_t hw_queue_id; /**< The HW queue on which to create virt
> queue */
> +       uint16_t nb_desc; /**< Number of descriptor for this virt queue */
> +       uint64_t dev_flags; /**< Device specific flags */
> +       void *dev_ctx; /**< Device specific context */
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Allocate and set up a virt queue.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param conf
> + *   The queue configuration structure encapsulated into
> rte_dmadev_queue_conf
> + *   object.
> + *
> + * @return
> + *   - >=0: Allocate virt queue success, it is virt queue id.
> + *   - <0: Error code returned by the driver queue setup function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_queue_setup(uint16_t dev_id,
> +                      const struct rte_dmadev_queue_conf *conf);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Release a virt queue.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue which return by queue setup.
> + *
> + * @return
> + *   - =0: Successful release the virt queue.
> + *   - <0: Error code returned by the driver queue release function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_queue_release(uint16_t dev_id, uint16_t vq_id);
> +
> +/**
> + * A structure used to retrieve information of a DMA virt queue.
> + */
> +struct rte_dmadev_queue_info {
> +       enum dma_transfer_direction direction;
> +       /**< Associated transfer direction */
> +       uint16_t hw_queue_id; /**< The HW queue on which to create virt
> queue */
> +       uint16_t nb_desc; /**< Number of descriptor for this virt queue */
> +       uint64_t dev_flags; /**< Device specific flags */
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Retrieve information of a DMA virt queue.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue which return by queue setup.
> + * @param[out] info
> + *   The queue info structure encapsulated into rte_dmadev_queue_info
> object.
> + *
> + * @return
> + *   - =0: Successful retrieve information.
> + *   - <0: Error code returned by the driver queue release function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_queue_info_get(uint16_t dev_id, uint16_t vq_id,
> +                         struct rte_dmadev_queue_info *info);
> +
> +#include "rte_dmadev_core.h"
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a copy operation onto the DMA virt queue.
> + *
> + * This queues up a copy operation to be performed by hardware, but does
> not
> + * trigger hardware to begin that operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue.
> + * @param src
> + *   The address of the source buffer.
> + * @param dst
> + *   The address of the destination buffer.
> + * @param length
> + *   The length of the data to be copied.
> + * @param flags
> + *   An opaque flags for this operation.
> + *
> + * @return
> + *   dma_cookie_t: please refer to the corresponding definition.
> + *
> + * NOTE: The caller must ensure that the input parameter is valid and the
> + *       corresponding device supports the operation.
> + */
> +__rte_experimental
> +static inline dma_cookie_t
> +rte_dmadev_copy(uint16_t dev_id, uint16_t vq_id, void *src, void *dst,

Did you consider also mbuf API usage for memory descriptor?

> +               uint32_t length, uint64_t flags)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       return (*dev->copy)(dev, vq_id, src, dst, length, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a scatter list copy operation onto the DMA virt queue.
> + *
> + * This queues up a scatter list copy operation to be performed by
> hardware,
> + * but does not trigger hardware to begin that operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue.
> + * @param sg
> + *   The pointer of scatterlist.
> + * @param sg_len
> + *   The number of scatterlist elements.
> + * @param flags
> + *   An opaque flags for this operation.
> + *
> + * @return
> + *   dma_cookie_t: please refer to the corresponding definition.
> + *
> + * NOTE: The caller must ensure that the input parameter is valid and the
> + *       corresponding device supports the operation.
> + */
> +__rte_experimental
> +static inline dma_cookie_t
> +rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vq_id,
> +                  const struct dma_scatterlist *sg,
> +                  uint32_t sg_len, uint64_t flags)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       return (*dev->copy_sg)(dev, vq_id, sg, sg_len, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a fill operation onto the DMA virt queue
> + *
> + * This queues up a fill operation to be performed by hardware, but does
> not
> + * trigger hardware to begin that operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue.
> + * @param pattern
> + *   The pattern to populate the destination buffer with.
> + * @param dst
> + *   The address of the destination buffer.
> + * @param length
> + *   The length of the destination buffer.
> + * @param flags
> + *   An opaque flags for this operation.
> + *
> + * @return
> + *   dma_cookie_t: please refer to the corresponding definition.
> + *
> + * NOTE: The caller must ensure that the input parameter is valid and the
> + *       corresponding device supports the operation.
> + */
> +__rte_experimental
> +static inline dma_cookie_t
> +rte_dmadev_fill(uint16_t dev_id, uint16_t vq_id, uint64_t pattern,
> +               void *dst, uint32_t length, uint64_t flags)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       return (*dev->fill)(dev, vq_id, pattern, dst, length, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a scatter list fill operation onto the DMA virt queue
> + *
> + * This queues up a scatter list fill operation to be performed by hardware,
> + * but does not trigger hardware to begin that operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue.
> + * @param pattern
> + *   The pattern to populate the destination buffer with.
> + * @param sg
> + *   The pointer of scatterlist.
> + * @param sg_len
> + *   The number of scatterlist elements.
> + * @param flags
> + *   An opaque flags for this operation.
> + *
> + * @return
> + *   dma_cookie_t: please refer to the corresponding definition.
> + *
> + * NOTE: The caller must ensure that the input parameter is valid and the
> + *       corresponding device supports the operation.
> + */
> +__rte_experimental
> +static inline dma_cookie_t
> +rte_dmadev_fill_sg(uint16_t dev_id, uint16_t vq_id, uint64_t pattern,
> +                  const struct dma_scatterlist *sg, uint32_t sg_len,
> +                  uint64_t flags)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       return (*dev->fill_sg)(dev, vq_id, pattern, sg, sg_len, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Add a fence to force ordering between operations
> + *
> + * This adds a fence to a sequence of operations to enforce ordering, such
> that
> + * all operations enqueued before the fence must be completed before
> operations
> + * after the fence.
> + * NOTE: Since this fence may be added as a flag to the last operation
> enqueued,
> + * this API may not function correctly when called immediately after an
> + * "rte_dmadev_perform" call i.e. before any new operations are
> enqueued.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue.
> + *
> + * @return
> + *   - =0: Successful add fence.
> + *   - <0: Failure to add fence.
> + *
> + * NOTE: The caller must ensure that the input parameter is valid and the
> + *       corresponding device supports the operation.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_fence(uint16_t dev_id, uint16_t vq_id)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       return (*dev->fence)(dev, vq_id);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Trigger hardware to begin performing enqueued operations
> + *
> + * This API is used to write the "doorbell" to the hardware to trigger it
> + * to begin the operations previously enqueued by rte_dmadev_copy/fill()
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue.
> + *
> + * @return
> + *   - =0: Successful trigger hardware.
> + *   - <0: Failure to trigger hardware.
> + *
> + * NOTE: The caller must ensure that the input parameter is valid and the
> + *       corresponding device supports the operation.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_perform(uint16_t dev_id, uint16_t vq_id)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       return (*dev->perform)(dev, vq_id);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Returns the number of operations that have been successful completed.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue.
> + * @param nb_cpls
> + *   The maximum number of completed operations that can be processed.
> + * @param[out] cookie
> + *   The last completed operation's cookie.
> + * @param[out] has_error
> + *   Indicates if there are transfer error.
> + *
> + * @return
> + *   The number of operations that successful completed.
> + *
> + * NOTE: The caller must ensure that the input parameter is valid and the
> + *       corresponding device supports the operation.
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_dmadev_completed(uint16_t dev_id, uint16_t vq_id, const uint16_t
> nb_cpls,
> +                    dma_cookie_t *cookie, bool *has_error)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       has_error = false;
> +       return (*dev->completed)(dev, vq_id, nb_cpls, cookie, has_error);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Returns the number of operations that failed to complete.
> + * NOTE: This API was used when rte_dmadev_completed has_error was
> set.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue.
> + * @param nb_status
> + *   Indicates the size of status array.
> + * @param[out] status
> + *   The error code of operations that failed to complete.
> + * @param[out] cookie
> + *   The last failed completed operation's cookie.
> + *
> + * @return
> + *   The number of operations that failed to complete.
> + *
> + * NOTE: The caller must ensure that the input parameter is valid and the
> + *       corresponding device supports the operation.
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vq_id,
> +                          const uint16_t nb_status, uint32_t *status,
> +                          dma_cookie_t *cookie)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       return (*dev->completed_fails)(dev, vq_id, nb_status, status, cookie);
> +}
> +
> +struct rte_dmadev_stats {
> +       uint64_t enqueue_fail_count;
> +       /**< Conut of all operations which failed enqueued */
> +       uint64_t enqueued_count;
> +       /**< Count of all operations which successful enqueued */
> +       uint64_t completed_fail_count;
> +       /**< Count of all operations which failed to complete */
> +       uint64_t completed_count;
> +       /**< Count of all operations which successful complete */
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Retrieve basic statistics of a or all DMA virt queue(s).
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue, -1 means all virt queues.
> + * @param[out] stats
> + *   The basic statistics structure encapsulated into rte_dmadev_stats
> + *   object.
> + *
> + * @return
> + *   - =0: Successful retrieve stats.
> + *   - <0: Failure to retrieve stats.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_stats_get(uint16_t dev_id, int vq_id,
> +                    struct rte_dmadev_stats *stats);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Reset basic statistics of a or all DMA virt queue(s).
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vq_id
> + *   The identifier of virt queue, -1 means all virt queues.
> + *
> + * @return
> + *   - =0: Successful retrieve stats.
> + *   - <0: Failure to retrieve stats.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_stats_reset(uint16_t dev_id, int vq_id);
> +
> +/** Maximum name length for extended statistics counters */
> +#define RTE_DMA_DEV_XSTATS_NAME_SIZE 64
> +
> +/**
> + * A name-key lookup element for extended statistics.
> + *
> + * This structure is used to map between names and ID numbers
> + * for extended ethdev statistics.
> + */
> +struct rte_dmadev_xstats_name {
> +       char name[RTE_DMA_DEV_XSTATS_NAME_SIZE];
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Retrieve names of extended statistics of a DMA device.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param[out] xstats_names
> + *   Block of memory to insert names into. Must be at least size in capacity.
> + *   If set to NULL, function returns required capacity.
> + * @param size
> + *   Capacity of xstats_names (number of names).
> + * @return
> + *   - positive value lower or equal to size: success. The return value
> + *     is the number of entries filled in the stats table.
> + *   - positive value higher than size: error, the given statistics table
> + *     is too small. The return value corresponds to the size that should
> + *     be given to succeed. The entries in the table are not valid and
> + *     shall not be used by the caller.
> + *   - negative value on error.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_xstats_names_get(uint16_t dev_id,
> +                           struct rte_dmadev_xstats_name *xstats_names,
> +                           uint32_t size);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Retrieve extended statistics of a DMA device.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param ids
> + *   The id numbers of the stats to get. The ids can be got from the stat
> + *   position in the stat list from rte_dmadev_get_xstats_names().
> + * @param[out] values
> + *   The values for each stats request by ID.
> + * @param n
> + *   The number of stats requested.
> + *
> + * @return
> + *   - positive value: number of stat entries filled into the values array.
> + *   - negative value on error.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_xstats_get(uint16_t dev_id, const uint32_t ids[],
> +                     uint64_t values[], uint32_t n);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Reset the values of the xstats of the selected component in the device.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param ids
> + *   Selects specific statistics to be reset. When NULL, all statistics
> + *   will be reset. If non-NULL, must point to array of at least
> + *   *nb_ids* size.
> + * @param nb_ids
> + *   The number of ids available from the *ids* array. Ignored when ids is
> NULL.
> + *
> + * @return
> + *   - zero: successfully reset the statistics to zero.
> + *   - negative value on error.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_xstats_reset(uint16_t dev_id, const uint32_t ids[], uint32_t
> nb_ids);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Trigger the dmadev self test.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - 0: Selftest successful.
> + *   - -ENOTSUP if the device doesn't support selftest
> + *   - other values < 0 on failure.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_selftest(uint16_t dev_id);
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* _RTE_DMADEV_H_ */
> diff --git a/lib/dmadev/rte_dmadev_core.h
> b/lib/dmadev/rte_dmadev_core.h
> new file mode 100644
> index 0000000..a3afea2
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev_core.h
> @@ -0,0 +1,98 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright 2021 HiSilicon Limited.
> + */
> +
> +#ifndef _RTE_DMADEV_CORE_H_
> +#define _RTE_DMADEV_CORE_H_
> +
> +/**
> + * @file
> + *
> + * RTE DMA Device internal header.
> + *
> + * This header contains internal data types. But they are still part of the
> + * public API because they are used by inline public functions.
> + */
> +
> +struct rte_dmadev;
> +
> +typedef dma_cookie_t (*dmadev_copy_t)(struct rte_dmadev *dev,
> uint16_t vq_id,
> +                                     void *src, void *dst,
> +                                     uint32_t length, uint64_t flags);
> +/**< @internal Function used to enqueue a copy operation. */
> +
> +typedef dma_cookie_t (*dmadev_copy_sg_t)(struct rte_dmadev *dev,
> uint16_t vq_id,
> +                                        const struct dma_scatterlist *sg,
> +                                        uint32_t sg_len, uint64_t flags);
> +/**< @internal Function used to enqueue a scatter list copy operation. */
> +
> +typedef dma_cookie_t (*dmadev_fill_t)(struct rte_dmadev *dev, uint16_t
> vq_id,
> +                                     uint64_t pattern, void *dst,
> +                                     uint32_t length, uint64_t flags);
> +/**< @internal Function used to enqueue a fill operation. */
> +
> +typedef dma_cookie_t (*dmadev_fill_sg_t)(struct rte_dmadev *dev,
> uint16_t vq_id,
> +                       uint64_t pattern, const struct dma_scatterlist *sg,
> +                       uint32_t sg_len, uint64_t flags);
> +/**< @internal Function used to enqueue a scatter list fill operation. */
> +
> +typedef int (*dmadev_fence_t)(struct rte_dmadev *dev, uint16_t vq_id);
> +/**< @internal Function used to add a fence ordering between operations.
> */
> +
> +typedef int (*dmadev_perform_t)(struct rte_dmadev *dev, uint16_t
> vq_id);
> +/**< @internal Function used to trigger hardware to begin performing. */
> +
> +typedef uint16_t (*dmadev_completed_t)(struct rte_dmadev *dev,
> uint16_t vq_id,
> +                                      const uint16_t nb_cpls,
> +                                      dma_cookie_t *cookie, bool *has_error);
> +/**< @internal Function used to return number of successful completed
> operations */
> +
> +typedef uint16_t (*dmadev_completed_fails_t)(struct rte_dmadev *dev,
> +                       uint16_t vq_id, const uint16_t nb_status,
> +                       uint32_t *status, dma_cookie_t *cookie);
> +/**< @internal Function used to return number of failed completed
> operations */
> +
> +#define RTE_DMADEV_NAME_MAX_LEN        64 /**< Max length of name
> of DMA PMD */
> +
> +struct rte_dmadev_ops;
> +
> +/**
> + * The data structure associated with each DMA device.
> + */
> +struct rte_dmadev {
> +       /**< Enqueue a copy operation onto the DMA device. */
> +       dmadev_copy_t copy;
> +       /**< Enqueue a scatter list copy operation onto the DMA device. */
> +       dmadev_copy_sg_t copy_sg;
> +       /**< Enqueue a fill operation onto the DMA device. */
> +       dmadev_fill_t fill;
> +       /**< Enqueue a scatter list fill operation onto the DMA device. */
> +       dmadev_fill_sg_t fill_sg;
> +       /**< Add a fence to force ordering between operations. */
> +       dmadev_fence_t fence;
> +       /**< Trigger hardware to begin performing enqueued operations. */
> +       dmadev_perform_t perform;
> +       /**< Returns the number of operations that successful completed. */
> +       dmadev_completed_t completed;
> +       /**< Returns the number of operations that failed to complete. */
> +       dmadev_completed_fails_t completed_fails;
> +
> +       void *dev_private; /**< PMD-specific private data */
> +       const struct rte_dmadev_ops *dev_ops; /**< Functions exported by
> PMD */
> +
> +       uint16_t dev_id; /**< Device ID for this instance */
> +       int socket_id; /**< Socket ID where memory is allocated */
> +       struct rte_device *device;
> +       /**< Device info. supplied during device initialization */
> +       const char *driver_name; /**< Driver info. supplied by probing */
> +       char name[RTE_DMADEV_NAME_MAX_LEN]; /**< Device name */
> +
> +       RTE_STD_C11
> +       uint8_t attached : 1; /**< Flag indicating the device is attached */
> +       uint8_t started : 1; /**< Device state: STARTED(1)/STOPPED(0) */
> +
> +} __rte_cache_aligned;
> +
> +extern struct rte_dmadev rte_dmadevices[];
> +
> +#endif /* _RTE_DMADEV_CORE_H_ */
> diff --git a/lib/dmadev/rte_dmadev_pmd.h
> b/lib/dmadev/rte_dmadev_pmd.h
> new file mode 100644
> index 0000000..ef03cf7
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev_pmd.h
> @@ -0,0 +1,210 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright 2021 HiSilicon Limited.
> + */
> +
> +#ifndef _RTE_DMADEV_PMD_H_
> +#define _RTE_DMADEV_PMD_H_
> +
> +/** @file
> + * RTE DMA PMD APIs
> + *
> + * @note
> + * Driver facing APIs for a DMA device. These are not to be called directly by
> + * any application.
> + */
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +#include <string.h>
> +
> +#include <rte_dev.h>
> +#include <rte_log.h>
> +#include <rte_common.h>
> +
> +#include "rte_dmadev.h"
> +
> +extern int libdmadev_logtype;
> +
> +#define RTE_DMADEV_LOG(level, fmt, args...) \
> +       rte_log(RTE_LOG_ ## level, libdmadev_logtype, "%s(): " fmt "\n", \
> +               __func__, ##args)
> +
> +/* Macros to check for valid device */
> +#define RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, retval) do { \
> +       if (!rte_dmadev_pmd_is_valid_dev((dev_id))) { \
> +               RTE_DMADEV_LOG(ERR, "Invalid dev_id=%d", dev_id); \
> +               return retval; \
> +       } \
> +} while (0)
> +
> +#define RTE_DMADEV_VALID_DEVID_OR_RET(dev_id) do { \
> +       if (!rte_dmadev_pmd_is_valid_dev((dev_id))) { \
> +               RTE_DMADEV_LOG(ERR, "Invalid dev_id=%d", dev_id); \
> +               return; \
> +       } \
> +} while (0)
> +
> +#define RTE_DMADEV_DETACHED  0
> +#define RTE_DMADEV_ATTACHED  1
> +
> +/**
> + * Validate if the DMA device index is a valid attached DMA device.
> + *
> + * @param dev_id
> + *   DMA device index.
> + *
> + * @return
> + *   - If the device index is valid (1) or not (0).
> + */
> +static inline unsigned
> +rte_dmadev_pmd_is_valid_dev(uint16_t dev_id)
> +{
> +       struct rte_dmadev *dev;
> +
> +       if (dev_id >= RTE_DMADEV_MAX_DEVS)
> +               return 0;
> +
> +       dev = &rte_dmadevices[dev_id];
> +       if (dev->attached != RTE_DMADEV_ATTACHED)
> +               return 0;
> +       else
> +               return 1;
> +}
> +
> +/**
> + * Definitions of control-plane functions exported by a driver through the
> + * generic structure of type *rte_dmadev_ops* supplied in the
> *rte_dmadev*
> + * structure associated with a device.
> + */
> +
> +typedef int (*dmadev_info_get_t)(struct rte_dmadev *dev,
> +                                struct rte_dmadev_info *dev_info);
> +/**< @internal Function used to get device information of a device. */
> +
> +typedef int (*dmadev_configure_t)(struct rte_dmadev *dev,
> +                                 const struct rte_dmadev_conf *dev_conf);
> +/**< @internal Function used to configure a device. */
> +
> +typedef int (*dmadev_start_t)(struct rte_dmadev *dev);
> +/**< @internal Function used to start a configured device. */
> +
> +typedef int (*dmadev_stop_t)(struct rte_dmadev *dev);
> +/**< @internal Function used to stop a configured device. */
> +
> +typedef int (*dmadev_close_t)(struct rte_dmadev *dev);
> +/**< @internal Function used to close a configured device. */
> +
> +typedef int (*dmadev_reset_t)(struct rte_dmadev *dev);
> +/**< @internal Function used to reset a configured device. */
> +
> +typedef int (*dmadev_queue_setup_t)(struct rte_dmadev *dev,
> +                                   const struct rte_dmadev_queue_conf *conf);
> +/**< @internal Function used to allocate and set up a virt queue. */
> +
> +typedef int (*dmadev_queue_release_t)(struct rte_dmadev *dev,
> uint16_t vq_id);
> +/**< @internal Function used to release a virt queue. */
> +
> +typedef int (*dmadev_queue_info_t)(struct rte_dmadev *dev, uint16_t
> vq_id,
> +                                  struct rte_dmadev_queue_info *info);
> +/**< @internal Function used to retrieve information of a virt queue. */
> +
> +typedef int (*dmadev_stats_get_t)(struct rte_dmadev *dev, int vq_id,
> +                                 struct rte_dmadev_stats *stats);
> +/**< @internal Function used to retrieve basic statistics. */
> +
> +typedef int (*dmadev_stats_reset_t)(struct rte_dmadev *dev, int vq_id);
> +/**< @internal Function used to reset basic statistics. */
> +
> +typedef int (*dmadev_xstats_get_names_t)(const struct rte_dmadev
> *dev,
> +               struct rte_dmadev_xstats_name *xstats_names,
> +               uint32_t size);
> +/**< @internal Function used to get names of extended stats. */
> +
> +typedef int (*dmadev_xstats_get_t)(const struct rte_dmadev *dev,
> +               const uint32_t ids[], uint64_t values[], uint32_t n);
> +/**< @internal Function used to retrieve extended stats. */
> +
> +typedef int (*dmadev_xstats_reset_t)(struct rte_dmadev *dev,
> +                                    const uint32_t ids[], uint32_t nb_ids);
> +/**< @internal Function used to reset extended stats. */
> +
> +typedef int (*dmadev_selftest_t)(uint16_t dev_id);
> +/**< @internal Function used to start dmadev selftest. */
> +
> +/** DMA device operations function pointer table */
> +struct rte_dmadev_ops {
> +       /**< Get device info. */
> +       dmadev_info_get_t dev_info_get;
> +       /**< Configure device. */
> +       dmadev_configure_t dev_configure;
> +       /**< Start device. */
> +       dmadev_start_t dev_start;
> +       /**< Stop device. */
> +       dmadev_stop_t dev_stop;
> +       /**< Close device. */
> +       dmadev_close_t dev_close;
> +       /**< Reset device. */
> +       dmadev_reset_t dev_reset;
> +
> +       /**< Allocate and set up a virt queue. */
> +       dmadev_queue_setup_t queue_setup;
> +       /**< Release a virt queue. */
> +       dmadev_queue_release_t queue_release;
> +       /**< Retrieve information of a virt queue */
> +       dmadev_queue_info_t queue_info_get;
> +
> +       /**< Get basic statistics. */
> +       dmadev_stats_get_t stats_get;
> +       /**< Reset basic statistics. */
> +       dmadev_stats_reset_t stats_reset;
> +       /**< Get names of extended stats. */
> +       dmadev_xstats_get_names_t xstats_get_names;
> +       /**< Get extended statistics. */
> +       dmadev_xstats_get_t xstats_get;
> +       /**< Reset extended statistics values. */
> +       dmadev_xstats_reset_t xstats_reset;
> +
> +       /**< Device selftest function */
> +       dmadev_selftest_t dev_selftest;
> +};
> +
> +/**
> + * Allocates a new dmadev slot for an DMA device and returns the pointer
> + * to that slot for the driver to use.
> + *
> + * @param name
> + *   Unique identifier name for each device
> + * @param dev_private_size
> + *   Size of private data memory allocated within rte_dmadev object.
> + *   Set to 0 to disable internal memory allocation and allow for
> + *   self-allocation.
> + * @param socket_id
> + *   Socket to allocate resources on.
> + *
> + * @return
> + *   - NULL: Failure to allocate
> + *   - Other: The rte_dmadev structure pointer for the new device
> + */
> +struct rte_dmadev *
> +rte_dmadev_pmd_allocate(const char *name, size_t dev_private_size,
> +                       int socket_id);
> +
> +/**
> + * Release the specified dmadev device.
> + *
> + * @param dev
> + *   The *dmadev* pointer is the address of the *rte_dmadev* structure.
> + *
> + * @return
> + *   - 0 on success, negative on error
> + */
> +int
> +rte_dmadev_pmd_release(struct rte_dmadev *dev);
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* _RTE_DMADEV_PMD_H_ */
> diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
> new file mode 100644
> index 0000000..383b3ca
> --- /dev/null
> +++ b/lib/dmadev/version.map
> @@ -0,0 +1,32 @@
> +EXPERIMENTAL {
> +       global:
> +
> +       rte_dmadev_count;
> +       rte_dmadev_get_dev_id;
> +       rte_dmadev_socket_id;
> +       rte_dmadev_info_get;
> +       rte_dmadev_configure;
> +       rte_dmadev_start;
> +       rte_dmadev_stop;
> +       rte_dmadev_close;
> +       rte_dmadev_reset;
> +       rte_dmadev_queue_setup;
> +       rte_dmadev_queue_release;
> +       rte_dmadev_queue_info_get;
> +       rte_dmadev_copy;
> +       rte_dmadev_copy_sg;
> +       rte_dmadev_fill;
> +       rte_dmadev_fill_sg;
> +       rte_dmadev_fence;
> +       rte_dmadev_perform;
> +       rte_dmadev_completed;
> +       rte_dmadev_completed_fails;
> +       rte_dmadev_stats_get;
> +       rte_dmadev_stats_reset;
> +       rte_dmadev_xstats_names_get;
> +       rte_dmadev_xstats_get;
> +       rte_dmadev_xstats_reset;
> +       rte_dmadev_selftest;
> +
> +       local: *;
> +};
> diff --git a/lib/meson.build b/lib/meson.build
> index 1673ca4..68d239f 100644
> --- a/lib/meson.build
> +++ b/lib/meson.build
> @@ -60,6 +60,7 @@ libraries = [
>          'bpf',
>          'graph',
>          'node',
> +        'dmadev',
>  ]
> 
>  if is_windows
> --
> 2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-04  9:30 ` Jerin Jacob
@ 2021-07-05 10:52   ` Bruce Richardson
  2021-07-05 11:12     ` Morten Brørup
                       ` (2 more replies)
  2021-07-06  3:01   ` fengchengwen
  1 sibling, 3 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-05 10:52 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Chengwen Feng, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	dpdk-dev, Morten Brørup, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma,
	Radha Mohan Chintakuntla

On Sun, Jul 04, 2021 at 03:00:30PM +0530, Jerin Jacob wrote:
> On Fri, Jul 2, 2021 at 6:51 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
> >
> > This patch introduces 'dmadevice' which is a generic type of DMA
> > device.
> >
> > The APIs of dmadev library exposes some generic operations which can
> > enable configuration and I/O with the DMA devices.
> >
> > Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> 
> Thanks for v1.
> 
> I would suggest finalizing  lib/dmadev/rte_dmadev.h before doing the
> implementation so that you don't need
> to waste time on rewoking the implementation.
> 

I actually like having the .c file available too. Before we lock down the
.h file and the API, I want to verify the performance of our drivers with
the implementation, and having a working .c file is obviously necessary for
that. So I appreciate having it as part of the RFC.

> Comments inline.
> 
> > ---
<snip>
> > + *
> > + * The DMA framework is built on the following abstraction model:
> > + *
> > + *     ------------    ------------
> > + *     |virt-queue|    |virt-queue|
> > + *     ------------    ------------
> > + *            \           /
> > + *             \         /
> > + *              \       /
> > + *            ------------     ------------
> > + *            | HW-queue |     | HW-queue |
> > + *            ------------     ------------
> > + *                   \            /
> > + *                    \          /
> > + *                     \        /
> > + *                     ----------
> > + *                     | dmadev |
> > + *                     ----------
> 
> Continuing the discussion with @Morten Brørup , I think, we need to
> finalize the model.
> 

+1 and the terminology with regards to queues and channels. With our ioat
hardware, each HW queue was called a channel for instance.

> > + *   a) The DMA operation request must be submitted to the virt queue, virt
> > + *      queues must be created based on HW queues, the DMA device could have
> > + *      multiple HW queues.
> > + *   b) The virt queues on the same HW-queue could represent different contexts,
> > + *      e.g. user could create virt-queue-0 on HW-queue-0 for mem-to-mem
> > + *      transfer scenario, and create virt-queue-1 on the same HW-queue for
> > + *      mem-to-dev transfer scenario.
> > + *   NOTE: user could also create multiple virt queues for mem-to-mem transfer
> > + *         scenario as long as the corresponding driver supports.
> > + *
> > + * The control plane APIs include configure/queue_setup/queue_release/start/
> > + * stop/reset/close, in order to start device work, the call sequence must be
> > + * as follows:
> > + *     - rte_dmadev_configure()
> > + *     - rte_dmadev_queue_setup()
> > + *     - rte_dmadev_start()
> 
> Please add reconfigure behaviour etc, Please check the
> lib/regexdev/rte_regexdev.h
> introduction. I have added similar ones so you could reuse as much as possible.
> 
> 
> > + * The dataplane APIs include two parts:
> > + *   a) The first part is the submission of operation requests:
> > + *        - rte_dmadev_copy()
> > + *        - rte_dmadev_copy_sg() - scatter-gather form of copy
> > + *        - rte_dmadev_fill()
> > + *        - rte_dmadev_fill_sg() - scatter-gather form of fill
> > + *        - rte_dmadev_fence()   - add a fence force ordering between operations
> > + *        - rte_dmadev_perform() - issue doorbell to hardware
> > + *      These APIs could work with different virt queues which have different
> > + *      contexts.
> > + *      The first four APIs are used to submit the operation request to the virt
> > + *      queue, if the submission is successful, a cookie (as type
> > + *      'dma_cookie_t') is returned, otherwise a negative number is returned.
> > + *   b) The second part is to obtain the result of requests:
> > + *        - rte_dmadev_completed()
> > + *            - return the number of operation requests completed successfully.
> > + *        - rte_dmadev_completed_fails()
> > + *            - return the number of operation requests failed to complete.
> > + *
> > + * The misc APIs include info_get/queue_info_get/stats/xstats/selftest, provide
> > + * information query and self-test capabilities.
> > + *
> > + * About the dataplane APIs MT-safe, there are two dimensions:
> > + *   a) For one virt queue, the submit/completion API could be MT-safe,
> > + *      e.g. one thread do submit operation, another thread do completion
> > + *      operation.
> > + *      If driver support it, then declare RTE_DMA_DEV_CAPA_MT_VQ.
> > + *      If driver don't support it, it's up to the application to guarantee
> > + *      MT-safe.
> > + *   b) For multiple virt queues on the same HW queue, e.g. one thread do
> > + *      operation on virt-queue-0, another thread do operation on virt-queue-1.
> > + *      If driver support it, then declare RTE_DMA_DEV_CAPA_MT_MVQ.
> > + *      If driver don't support it, it's up to the application to guarantee
> > + *      MT-safe.
> 
> From an application PoV it may not be good to write portable
> applications. Please check
> latest thread with @Morten Brørup
> 
> > + */
> > +
> > +#ifdef __cplusplus
> > +extern "C" {
> > +#endif
> > +
> > +#include <rte_common.h>
> > +#include <rte_memory.h>
> > +#include <rte_errno.h>
> > +#include <rte_compat.h>
> 
> Sort in alphabetical order.
> 
> > +
> > +/**
> > + * dma_cookie_t - an opaque DMA cookie
> 
> Since we are defining the behaviour is not opaque any more.
> I think, it is better to call ring_idx or so.
> 

+1 for ring index. We don't need a separate type for it though, just
document the index as an unsigned return value.

> 
> > +#define RTE_DMA_DEV_CAPA_MT_MVQ (1ull << 11) /**< Support MT-safe of multiple virt queues */
> 
> Please lot of @see for all symbols where it is being used. So that one
> can understand the full scope of
> symbols. See below example.
> 
> #define RTE_REGEXDEV_CAPA_RUNTIME_COMPILATION_F (1ULL << 0)
> /**< RegEx device does support compiling the rules at runtime unlike
>  * loading only the pre-built rule database using
>  * struct rte_regexdev_config::rule_db in rte_regexdev_configure()
>  *
>  * @see struct rte_regexdev_config::rule_db, rte_regexdev_configure()
>  * @see struct rte_regexdev_info::regexdev_capa
>  */
> 
> > + *
> > + * If dma_cookie_t is >=0 it's a DMA operation request cookie, <0 it's a error
> > + * code.
> > + * When using cookies, comply with the following rules:
> > + * a) Cookies for each virtual queue are independent.
> > + * b) For a virt queue, the cookie are monotonically incremented, when it reach
> > + *    the INT_MAX, it wraps back to zero.

I disagree with the INT_MAX (or INT32_MAX) value here. If we use that
value, it means that we cannot use implicit wrap-around inside the CPU and
have to check for the INT_MAX value. Better to:
1. Specify that it wraps at UINT16_MAX which allows us to just use a
uint16_t internally and wrap-around automatically, or:
2. Specify that it wraps at a power-of-2 value >= UINT16_MAX, giving
drivers the flexibility at what value to wrap around.

> > + * c) The initial cookie of a virt queue is zero, after the device is stopped or
> > + *    reset, the virt queue's cookie needs to be reset to zero.
> > + * Example:
> > + *    step-1: start one dmadev
> > + *    step-2: enqueue a copy operation, the cookie return is 0
> > + *    step-3: enqueue a copy operation again, the cookie return is 1
> > + *    ...
> > + *    step-101: stop the dmadev
> > + *    step-102: start the dmadev
> > + *    step-103: enqueue a copy operation, the cookie return is 0
> > + *    ...
> > + */
> 
> Good explanation.
> 
> > +typedef int32_t dma_cookie_t;
> 

As I mentioned before, I'd just remove this, and use regular int types,
with "ring_idx" as the name.

> 
> > +
> > +/**
> > + * dma_scatterlist - can hold scatter DMA operation request
> > + */
> > +struct dma_scatterlist {
> 
> I prefer to change scatterlist -> sg
> i.e rte_dma_sg
> 
> > +       void *src;
> > +       void *dst;
> > +       uint32_t length;
> > +};
> > +
> 
> > +
> > +/**
> > + * A structure used to retrieve the contextual information of
> > + * an DMA device
> > + */
> > +struct rte_dmadev_info {
> > +       /**
> > +        * Fields filled by framewok
> 
> typo.
> 
> > +        */
> > +       struct rte_device *device; /**< Generic Device information */
> > +       const char *driver_name; /**< Device driver name */
> > +       int socket_id; /**< Socket ID where memory is allocated */
> > +
> > +       /**
> > +        * Specification fields filled by driver
> > +        */
> > +       uint64_t dev_capa; /**< Device capabilities (RTE_DMA_DEV_CAPA_) */
> > +       uint16_t max_hw_queues; /**< Maximum number of HW queues. */
> > +       uint16_t max_vqs_per_hw_queue;
> > +       /**< Maximum number of virt queues to allocate per HW queue */
> > +       uint16_t max_desc;
> > +       /**< Maximum allowed number of virt queue descriptors */
> > +       uint16_t min_desc;
> > +       /**< Minimum allowed number of virt queue descriptors */
> 
> Please add max_nb_segs. i.e maximum number of segments supported.
> 
> > +
> > +       /**
> > +        * Status fields filled by driver
> > +        */
> > +       uint16_t nb_hw_queues; /**< Number of HW queues configured */
> > +       uint16_t nb_vqs; /**< Number of virt queues configured */
> > +};
> > + i
> > +
> > +/**
> > + * dma_address_type
> > + */
> > +enum dma_address_type {
> > +       DMA_ADDRESS_TYPE_IOVA, /**< Use IOVA as dma address */
> > +       DMA_ADDRESS_TYPE_VA, /**< Use VA as dma address */
> > +};
> > +
> > +/**
> > + * A structure used to configure a DMA device.
> > + */
> > +struct rte_dmadev_conf {
> > +       enum dma_address_type addr_type; /**< Address type to used */
> 
> I think, there are 3 kinds of limitations/capabilities.
> 
> When the system is configured as IOVA as VA
> 1) Device supports any VA address like memory from rte_malloc(),
> rte_memzone(), malloc, stack memory
> 2) Device support only VA address from rte_malloc(), rte_memzone() i.e
> memory backed by hugepage and added to DMA map.
> 
> When the system is configured as IOVA as PA
> 1) Devices support only PA addresses .
> 
> IMO, Above needs to be  advertised as capability and application needs
> to align with that
> and I dont think application requests the driver to work in any of the modes.
> 
> 

I don't think we need this level of detail for addressing capabilities.
Unless I'm missing something, the hardware should behave exactly as other
hardware does taking in iova's.  If the user wants to check whether virtual
addresses to pinned memory can be used directly, the user can call
"rte_eal_iova_mode". We can't have a situation where some hardware uses one
type of addresses and another hardware the other.

Therefore, the only additional addressing capability we should need to
report is that the hardware can use SVM/SVA and use virtual addresses not
in hugepage memory.

> 
> > +       uint16_t nb_hw_queues; /**< Number of HW-queues enable to use */
> > +       uint16_t max_vqs; /**< Maximum number of virt queues to use */
> 
> You need to what is max value allowed etc i.e it is based on
> info_get() and mention the field
> in info structure
> 
> 
> > +
> > +/**
> > + * dma_transfer_direction
> > + */
> > +enum dma_transfer_direction {
> 
> rte_dma_transter_direction
> 
> > +       DMA_MEM_TO_MEM,
> > +       DMA_MEM_TO_DEV,
> > +       DMA_DEV_TO_MEM,
> > +       DMA_DEV_TO_DEV,
> > +};
> > +
> > +/**
> > + * A structure used to configure a DMA virt queue.
> > + */
> > +struct rte_dmadev_queue_conf {
> > +       enum dma_transfer_direction direction;
> 
> 
> > +       /**< Associated transfer direction */
> > +       uint16_t hw_queue_id; /**< The HW queue on which to create virt queue */
> > +       uint16_t nb_desc; /**< Number of descriptor for this virt queue */
> > +       uint64_t dev_flags; /**< Device specific flags */
> 
> Use of this? Need more comments on this.
> Since it is in slowpath, We can have non opaque names here based on
> each driver capability.
> 
> 
> > +       void *dev_ctx; /**< Device specific context */
> 
> Use of this ? Need more comment ont this.
> 

I think this should be dropped. We should not have any opaque
device-specific info in these structs, rather if a particular device needs
parameters we should call them out. Drivers for which it's not relevant can
ignore them (and report same in capability if necessary). Since this is not
a dataplane API, we aren't concerned too much about perf and can size the
struct appropriately.

> 
> Please add some good amount of reserved bits and have API to init this
> structure for future ABI stability, say rte_dmadev_queue_config_init()
> or so.
> 

I don't think that is necessary. Since the config struct is used only as
parameter to the config function, any changes to it can be managed by
versioning that single function. Padding would only be necessary if we had
an array of these config structs somewhere.

> 
> > +
> > +/**
> > + * A structure used to retrieve information of a DMA virt queue.
> > + */
> > +struct rte_dmadev_queue_info {
> > +       enum dma_transfer_direction direction;
> 
> A queue may support all directions so I think it should be a bitfield.
> 
> > +       /**< Associated transfer direction */
> > +       uint16_t hw_queue_id; /**< The HW queue on which to create virt queue */
> > +       uint16_t nb_desc; /**< Number of descriptor for this virt queue */
> > +       uint64_t dev_flags; /**< Device specific flags */
> > +};
> > +
> 
> > +__rte_experimental
> > +static inline dma_cookie_t
> > +rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vq_id,
> > +                  const struct dma_scatterlist *sg,
> > +                  uint32_t sg_len, uint64_t flags)
> 
> I would like to change this as:
> rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vq_id, const struct
> rte_dma_sg *src, uint32_t nb_src,
> const struct rte_dma_sg *dst, uint32_t nb_dst) or so allow the use case like
> src 30 MB copy can be splitted as written as 1 MB x 30 dst.
> 
> 
> 
> > +{
> > +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> > +       return (*dev->copy_sg)(dev, vq_id, sg, sg_len, flags);
> > +}
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Enqueue a fill operation onto the DMA virt queue
> > + *
> > + * This queues up a fill operation to be performed by hardware, but does not
> > + * trigger hardware to begin that operation.
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + * @param vq_id
> > + *   The identifier of virt queue.
> > + * @param pattern
> > + *   The pattern to populate the destination buffer with.
> > + * @param dst
> > + *   The address of the destination buffer.
> > + * @param length
> > + *   The length of the destination buffer.
> > + * @param flags
> > + *   An opaque flags for this operation.
> 
> PLEASE REMOVE opaque stuff from fastpath it will be a pain for
> application writers as
> they need to write multiple combinations of fastpath. flags are OK, if
> we have a valid
> generic flag now to control the transfer behavior.
> 

+1. Flags need to be explicitly listed. If we don't have any flags for now,
we can specify that the value must be given as zero and it's for future
use.

> 
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Add a fence to force ordering between operations
> > + *
> > + * This adds a fence to a sequence of operations to enforce ordering, such that
> > + * all operations enqueued before the fence must be completed before operations
> > + * after the fence.
> > + * NOTE: Since this fence may be added as a flag to the last operation enqueued,
> > + * this API may not function correctly when called immediately after an
> > + * "rte_dmadev_perform" call i.e. before any new operations are enqueued.
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + * @param vq_id
> > + *   The identifier of virt queue.
> > + *
> > + * @return
> > + *   - =0: Successful add fence.
> > + *   - <0: Failure to add fence.
> > + *
> > + * NOTE: The caller must ensure that the input parameter is valid and the
> > + *       corresponding device supports the operation.
> > + */
> > +__rte_experimental
> > +static inline int
> > +rte_dmadev_fence(uint16_t dev_id, uint16_t vq_id)
> > +{
> > +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> > +       return (*dev->fence)(dev, vq_id);
> > +}
> 
> Since HW submission is in a queue(FIFO) the ordering is always
> maintained. Right?
> Could you share more details and use case of fence() from
> driver/application PoV?
> 

There are different kinds of ordering to consider, ordering of completions
and the ordering of operations. While jobs are reported as completed to the
user in order, for performance hardware, may overlap individual jobs within
a burst (or even across bursts). Therefore, we need a fence operation to
inform hardware that one job should not be started until the other has
fully completed.

> 
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Trigger hardware to begin performing enqueued operations
> > + *
> > + * This API is used to write the "doorbell" to the hardware to trigger it
> > + * to begin the operations previously enqueued by rte_dmadev_copy/fill()
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + * @param vq_id
> > + *   The identifier of virt queue.
> > + *
> > + * @return
> > + *   - =0: Successful trigger hardware.
> > + *   - <0: Failure to trigger hardware.
> > + *
> > + * NOTE: The caller must ensure that the input parameter is valid and the
> > + *       corresponding device supports the operation.
> > + */
> > +__rte_experimental
> > +static inline int
> > +rte_dmadev_perform(uint16_t dev_id, uint16_t vq_id)
> > +{
> > +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> > +       return (*dev->perform)(dev, vq_id);
> > +}
> 
> Since we have additional function call overhead in all the
> applications for this scheme, I would like to understand
> the use of doing this way vs enq does the doorbell implicitly from
> driver/application PoV?
> 

In our benchmarks it's just faster. When we tested it, the overhead of the
function calls was noticably less than the cost of building up the
parameter array(s) for passing the jobs in as a burst. [We don't see this
cost with things like NIC I/O since DPDK tends to already have the mbuf
fully populated before the TX call anyway.]

> 
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Returns the number of operations that have been successful completed.
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + * @param vq_id
> > + *   The identifier of virt queue.
> > + * @param nb_cpls
> > + *   The maximum number of completed operations that can be processed.
> > + * @param[out] cookie
> > + *   The last completed operation's cookie.
> > + * @param[out] has_error
> > + *   Indicates if there are transfer error.
> > + *
> > + * @return
> > + *   The number of operations that successful completed.
> 
> successfully
> 
> > + *
> > + * NOTE: The caller must ensure that the input parameter is valid and the
> > + *       corresponding device supports the operation.
> > + */
> > +__rte_experimental
> > +static inline uint16_t
> > +rte_dmadev_completed(uint16_t dev_id, uint16_t vq_id, const uint16_t nb_cpls,
> > +                    dma_cookie_t *cookie, bool *has_error)
> > +{
> > +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> > +       has_error = false;
> > +       return (*dev->completed)(dev, vq_id, nb_cpls, cookie, has_error);
> 
> It may be better to have cookie/ring_idx as third argument.
> 

No strong opinions here, but having it as in the code above means all
input parameters come before all output, which makes sense to me.

> > +}
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Returns the number of operations that failed to complete.
> > + * NOTE: This API was used when rte_dmadev_completed has_error was set.
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + * @param vq_id
> > + *   The identifier of virt queue.
> (> + * @param nb_status
> > + *   Indicates the size  of status array.
> > + * @param[out] status
> > + *   The error code of operations that failed to complete.
> > + * @param[out] cookie
> > + *   The last failed completed operation's cookie.
> > + *
> > + * @return
> > + *   The number of operations that failed to complete.
> > + *
> > + * NOTE: The caller must ensure that the input parameter is valid and the
> > + *       corresponding device supports the operation.
> > + */
> > +__rte_experimental
> > +static inline uint16_t
> > +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vq_id,
> > +                          const uint16_t nb_status, uint32_t *status,
> > +                          dma_cookie_t *cookie)
> 
> IMO, it is better to move cookie/rind_idx at 3.
> Why it would return any array of errors? since it called after
> rte_dmadev_completed() has
> has_error. Is it better to change
> 
> rte_dmadev_error_status((uint16_t dev_id, uint16_t vq_id, dma_cookie_t
> *cookie,  uint32_t *status)
> 
> I also think, we may need to set status as bitmask and enumerate all
> the combination of error codes
> of all the driver and return string from driver existing rte_flow_error
> 
> See
> struct rte_flow_error {
>         enum rte_flow_error_type type; /**< Cause field and error types. */
>         const void *cause; /**< Object responsible for the error. */
>         const char *message; /**< Human-readable error message. */
> };
> 

I think we need a multi-return value API here, as we may add operations in
future which have non-error status values to return. The obvious case is
DMA engines which support "compare" operations. In that case a successful
compare (as in there were no DMA or HW errors) can return "equal" or
"not-equal" as statuses. For general "copy" operations, the faster
completion op can be used to just return successful values (and only call
this status version on error), while apps using those compare ops or a
mixture of copy and compare ops, would always use the slower one that
returns status values for each and every op..

The ioat APIs used 32-bit integer values for this status array so as to
allow e.g. 16-bits for error code and 16-bits for future status values. For
most operations there should be a fairly small set of things that can go
wrong, i.e. bad source address, bad destination address or invalid length.
Within that we may have a couple of specifics for why an address is bad,
but even so I don't think we need to start having multiple bit
combinations.

> > +{
> > +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> > +       return (*dev->completed_fails)(dev, vq_id, nb_status, status, cookie);
> > +}
> > +
> > +struct rte_dmadev_stats {
> > +       uint64_t enqueue_fail_count;
> > +       /**< Conut of all operations which failed enqueued */
> > +       uint64_t enqueued_count;
> > +       /**< Count of all operations which successful enqueued */
> > +       uint64_t completed_fail_count;
> > +       /**< Count of all operations which failed to complete */
> > +       uint64_t completed_count;
> > +       /**< Count of all operations which successful complete */
> > +};
> 
> We need to have capability API to tell which items are
> updated/supported by the driver.
> 

I also would remove the enqueue fail counts, since they are better counted
by the app. If a driver reports 20,000 failures we have no way of knowing
if that is 20,000 unique operations which failed to enqueue or a single
operation which failed to enqueue 20,000 times but succeeded on attempt
20,001.

> 
> > diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
> > new file mode 100644
> > index 0000000..a3afea2
> > --- /dev/null
> > +++ b/lib/dmadev/rte_dmadev_core.h
> > @@ -0,0 +1,98 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright 2021 HiSilicon Limited.
> > + */
> > +
> > +#ifndef _RTE_DMADEV_CORE_H_
> > +#define _RTE_DMADEV_CORE_H_
> > +
> > +/**
> > + * @file
> > + *
> > + * RTE DMA Device internal header.
> > + *
> > + * This header contains internal data types. But they are still part of the
> > + * public API because they are used by inline public functions.
> > + */
> > +
> > +struct rte_dmadev;
> > +
> > +typedef dma_cookie_t (*dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vq_id,
> > +                                     void *src, void *dst,
> > +                                     uint32_t length, uint64_t flags);
> > +/**< @internal Function used to enqueue a copy operation. */
> 
> To avoid namespace conflict(as it is public API) use rte_
> 
> 
> > +
> > +/**
> > + * The data structure associated with each DMA device.
> > + */
> > +struct rte_dmadev {
> > +       /**< Enqueue a copy operation onto the DMA device. */
> > +       dmadev_copy_t copy;
> > +       /**< Enqueue a scatter list copy operation onto the DMA device. */
> > +       dmadev_copy_sg_t copy_sg;
> > +       /**< Enqueue a fill operation onto the DMA device. */
> > +       dmadev_fill_t fill;
> > +       /**< Enqueue a scatter list fill operation onto the DMA device. */
> > +       dmadev_fill_sg_t fill_sg;
> > +       /**< Add a fence to force ordering between operations. */
> > +       dmadev_fence_t fence;
> > +       /**< Trigger hardware to begin performing enqueued operations. */
> > +       dmadev_perform_t perform;
> > +       /**< Returns the number of operations that successful completed. */
> > +       dmadev_completed_t completed;
> > +       /**< Returns the number of operations that failed to complete. */
> > +       dmadev_completed_fails_t completed_fails;
> 
> We need to limit fastpath items in 1 CL
> 

I don't think that is going to be possible. I also would like to see
numbers to check if we benefit much from having these fastpath ops separate
from the regular ops.

> > +
> > +       void *dev_private; /**< PMD-specific private data */
> > +       const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD */
> > +
> > +       uint16_t dev_id; /**< Device ID for this instance */
> > +       int socket_id; /**< Socket ID where memory is allocated */
> > +       struct rte_device *device;
> > +       /**< Device info. supplied during device initialization */
> > +       const char *driver_name; /**< Driver info. supplied by probing */
> > +       char name[RTE_DMADEV_NAME_MAX_LEN]; /**< Device name */
> > +
> > +       RTE_STD_C11
> > +       uint8_t attached : 1; /**< Flag indicating the device is attached */
> > +       uint8_t started : 1; /**< Device state: STARTED(1)/STOPPED(0) */
> 
> Add a couple of reserved fields for future ABI stability.
> 
> > +
> > +} __rte_cache_aligned;
> > +
> > +extern struct rte_dmadev rte_dmadevices[];
> > +

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-05 10:52   ` Bruce Richardson
@ 2021-07-05 11:12     ` Morten Brørup
  2021-07-05 13:44       ` Bruce Richardson
  2021-07-05 15:55     ` Jerin Jacob
  2021-07-06  8:20     ` fengchengwen
  2 siblings, 1 reply; 339+ messages in thread
From: Morten Brørup @ 2021-07-05 11:12 UTC (permalink / raw)
  To: Bruce Richardson, Jerin Jacob
  Cc: Chengwen Feng, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	dpdk-dev, Nipun Gupta, Hemant Agrawal, Maxime Coquelin,
	Honnappa Nagarahalli, David Marchand, Satananda Burla,
	Prasun Kapoor, Ananyev, Konstantin, liangma,
	Radha Mohan Chintakuntla

> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Bruce Richardson
> Sent: Monday, 5 July 2021 12.53
> 
> On Sun, Jul 04, 2021 at 03:00:30PM +0530, Jerin Jacob wrote:
> > On Fri, Jul 2, 2021 at 6:51 PM Chengwen Feng
> <fengchengwen@huawei.com> wrote:
> > >
> > > +
> > > +/**
> > > + * The data structure associated with each DMA device.
> > > + */
> > > +struct rte_dmadev {
> > > +       /**< Enqueue a copy operation onto the DMA device. */
> > > +       dmadev_copy_t copy;
> > > +       /**< Enqueue a scatter list copy operation onto the DMA
> device. */
> > > +       dmadev_copy_sg_t copy_sg;
> > > +       /**< Enqueue a fill operation onto the DMA device. */
> > > +       dmadev_fill_t fill;
> > > +       /**< Enqueue a scatter list fill operation onto the DMA
> device. */
> > > +       dmadev_fill_sg_t fill_sg;
> > > +       /**< Add a fence to force ordering between operations. */
> > > +       dmadev_fence_t fence;
> > > +       /**< Trigger hardware to begin performing enqueued
> operations. */
> > > +       dmadev_perform_t perform;
> > > +       /**< Returns the number of operations that successful
> completed. */
> > > +       dmadev_completed_t completed;
> > > +       /**< Returns the number of operations that failed to
> complete. */
> > > +       dmadev_completed_fails_t completed_fails;
> >
> > We need to limit fastpath items in 1 CL
> >
> 
> I don't think that is going to be possible. I also would like to see
> numbers to check if we benefit much from having these fastpath ops
> separate
> from the regular ops.

The fastpath ops may not fit into 1 cache line, but it is a good design practice to separate hot data from cold data, and I do consider the fastpath function pointers hot and configuration function pointers cold.

The important point is keeping all the fastpath ops (of a dmadevice) together and spanning as few cache lines as possible.

Configuration ops and other slow data may follow in the same structure; that should make no performance difference. It might make a difference for memory consumption if the other data are very large and not dynamically allocated, as we are discussing regarding ethdev.


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-05 11:12     ` Morten Brørup
@ 2021-07-05 13:44       ` Bruce Richardson
  0 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-05 13:44 UTC (permalink / raw)
  To: Morten Brørup
  Cc: Jerin Jacob, Chengwen Feng, Thomas Monjalon, Ferruh Yigit,
	Jerin Jacob, dpdk-dev, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma,
	Radha Mohan Chintakuntla

On Mon, Jul 05, 2021 at 01:12:54PM +0200, Morten Brørup wrote:
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Bruce Richardson
> > Sent: Monday, 5 July 2021 12.53
> > 
> > On Sun, Jul 04, 2021 at 03:00:30PM +0530, Jerin Jacob wrote:
> > > On Fri, Jul 2, 2021 at 6:51 PM Chengwen Feng
> > <fengchengwen@huawei.com> wrote:
> > > >
> > > > +
> > > > +/**
> > > > + * The data structure associated with each DMA device.
> > > > + */
> > > > +struct rte_dmadev {
> > > > +       /**< Enqueue a copy operation onto the DMA device. */
> > > > +       dmadev_copy_t copy;
> > > > +       /**< Enqueue a scatter list copy operation onto the DMA
> > device. */
> > > > +       dmadev_copy_sg_t copy_sg;
> > > > +       /**< Enqueue a fill operation onto the DMA device. */
> > > > +       dmadev_fill_t fill;
> > > > +       /**< Enqueue a scatter list fill operation onto the DMA
> > device. */
> > > > +       dmadev_fill_sg_t fill_sg;
> > > > +       /**< Add a fence to force ordering between operations. */
> > > > +       dmadev_fence_t fence;
> > > > +       /**< Trigger hardware to begin performing enqueued
> > operations. */
> > > > +       dmadev_perform_t perform;
> > > > +       /**< Returns the number of operations that successful
> > completed. */
> > > > +       dmadev_completed_t completed;
> > > > +       /**< Returns the number of operations that failed to
> > complete. */
> > > > +       dmadev_completed_fails_t completed_fails;
> > >
> > > We need to limit fastpath items in 1 CL
> > >
> > 
> > I don't think that is going to be possible. I also would like to see
> > numbers to check if we benefit much from having these fastpath ops
> > separate
> > from the regular ops.
> 
> The fastpath ops may not fit into 1 cache line, but it is a good design practice to separate hot data from cold data, and I do consider the fastpath function pointers hot and configuration function pointers cold.
> 
> The important point is keeping all the fastpath ops (of a dmadevice) together and spanning as few cache lines as possible.
> 
> Configuration ops and other slow data may follow in the same structure; that should make no performance difference. It might make a difference for memory consumption if the other data are very large and not dynamically allocated, as we are discussing regarding ethdev.
> 

Yes, I agree if it can be done, it should be.

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-05 10:52   ` Bruce Richardson
  2021-07-05 11:12     ` Morten Brørup
@ 2021-07-05 15:55     ` Jerin Jacob
  2021-07-05 17:16       ` Bruce Richardson
  2021-07-06  8:20     ` fengchengwen
  2 siblings, 1 reply; 339+ messages in thread
From: Jerin Jacob @ 2021-07-05 15:55 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: Chengwen Feng, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	dpdk-dev, Morten Brørup, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma,
	Radha Mohan Chintakuntla

 need

On Mon, Jul 5, 2021 at 4:22 PM Bruce Richardson
<bruce.richardson@intel.com> wrote:
>
> On Sun, Jul 04, 2021 at 03:00:30PM +0530, Jerin Jacob wrote:
> > On Fri, Jul 2, 2021 at 6:51 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
> > >
> > > This patch introduces 'dmadevice' which is a generic type of DMA
> > > device.
> > >
> > > The APIs of dmadev library exposes some generic operations which can
> > > enable configuration and I/O with the DMA devices.
> > >
> > > Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> >
> > Thanks for v1.
> >
> > I would suggest finalizing  lib/dmadev/rte_dmadev.h before doing the
> > implementation so that you don't need
> > to waste time on rewoking the implementation.
> >
>
> I actually like having the .c file available too. Before we lock down the
> .h file and the API, I want to verify the performance of our drivers with
> the implementation, and having a working .c file is obviously necessary for
> that. So I appreciate having it as part of the RFC.

Ack.

>
> > Comments inline.
> >
> > > ---
> <snip>
> > > + *
> > > + * The DMA framework is built on the following abstraction model:
> > > + *
> > > + *     ------------    ------------
> > > + *     |virt-queue|    |virt-queue|
> > > + *     ------------    ------------
> > > + *            \           /
> > > + *             \         /
> > > + *              \       /
> > > + *            ------------     ------------
> > > + *            | HW-queue |     | HW-queue |
> > > + *            ------------     ------------
> > > + *                   \            /
> > > + *                    \          /
> > > + *                     \        /
> > > + *                     ----------
> > > + *                     | dmadev |
> > > + *                     ----------
> >
> > Continuing the discussion with @Morten Brørup , I think, we need to
> > finalize the model.
> >
>
> +1 and the terminology with regards to queues and channels. With our ioat
> hardware, each HW queue was called a channel for instance.

Looks like <dmadev> <> <channel> can cover all the use cases, if the
HW has more than
1 queues it can be exposed as separate dmadev dev.


>
> > > + *   a) The DMA operation request must be submitted to the virt queue, virt
> > > + *      queues must be created based on HW queues, the DMA device could have
> > > + *      multiple HW queues.
> > > + *   b) The virt queues on the same HW-queue could represent different contexts,
> > > + *      e.g. user could create virt-queue-0 on HW-queue-0 for mem-to-mem
> > > + *      transfer scenario, and create virt-queue-1 on the same HW-queue for
> > > + *      mem-to-dev transfer scenario.
> > > + *   NOTE: user could also create multiple virt queues for mem-to-mem transfer
> > > + *         scenario as long as the corresponding driver supports.
> > > + *
> > > + * The control plane APIs include configure/queue_setup/queue_release/start/
> > > + * stop/reset/close, in order to start device work, the call sequence must be
> > > + * as follows:
> > > + *     - rte_dmadev_configure()
> > > + *     - rte_dmadev_queue_setup()
> > > + *     - rte_dmadev_start()
> >
> > Please add reconfigure behaviour etc, Please check the
> > lib/regexdev/rte_regexdev.h
> > introduction. I have added similar ones so you could reuse as much as possible.
> >
> >
> > > + * The dataplane APIs include two parts:
> > > + *   a) The first part is the submission of operation requests:
> > > + *        - rte_dmadev_copy()
> > > + *        - rte_dmadev_copy_sg() - scatter-gather form of copy
> > > + *        - rte_dmadev_fill()
> > > + *        - rte_dmadev_fill_sg() - scatter-gather form of fill
> > > + *        - rte_dmadev_fence()   - add a fence force ordering between operations
> > > + *        - rte_dmadev_perform() - issue doorbell to hardware
> > > + *      These APIs could work with different virt queues which have different
> > > + *      contexts.
> > > + *      The first four APIs are used to submit the operation request to the virt
> > > + *      queue, if the submission is successful, a cookie (as type
> > > + *      'dma_cookie_t') is returned, otherwise a negative number is returned.
> > > + *   b) The second part is to obtain the result of requests:
> > > + *        - rte_dmadev_completed()
> > > + *            - return the number of operation requests completed successfully.
> > > + *        - rte_dmadev_completed_fails()
> > > + *            - return the number of operation requests failed to complete.
> > > + *
> > > + * The misc APIs include info_get/queue_info_get/stats/xstats/selftest, provide
> > > + * information query and self-test capabilities.
> > > + *
> > > + * About the dataplane APIs MT-safe, there are two dimensions:
> > > + *   a) For one virt queue, the submit/completion API could be MT-safe,
> > > + *      e.g. one thread do submit operation, another thread do completion
> > > + *      operation.
> > > + *      If driver support it, then declare RTE_DMA_DEV_CAPA_MT_VQ.
> > > + *      If driver don't support it, it's up to the application to guarantee
> > > + *      MT-safe.
> > > + *   b) For multiple virt queues on the same HW queue, e.g. one thread do
> > > + *      operation on virt-queue-0, another thread do operation on virt-queue-1.
> > > + *      If driver support it, then declare RTE_DMA_DEV_CAPA_MT_MVQ.
> > > + *      If driver don't support it, it's up to the application to guarantee
> > > + *      MT-safe.
> >
> > From an application PoV it may not be good to write portable
> > applications. Please check
> > latest thread with @Morten Brørup
> >
> > > + */
> > > +
> > > +#ifdef __cplusplus
> > > +extern "C" {
> > > +#endif
> > > +
> > > +#include <rte_common.h>
> > > +#include <rte_memory.h>
> > > +#include <rte_errno.h>
> > > +#include <rte_compat.h>
> >
> > Sort in alphabetical order.
> >
> > > +
> > > +/**
> > > + * dma_cookie_t - an opaque DMA cookie
> >
> > Since we are defining the behaviour is not opaque any more.
> > I think, it is better to call ring_idx or so.
> >
>
> +1 for ring index. We don't need a separate type for it though, just
> document the index as an unsigned return value.
>
> >
> > > +#define RTE_DMA_DEV_CAPA_MT_MVQ (1ull << 11) /**< Support MT-safe of multiple virt queues */
> >
> > Please lot of @see for all symbols where it is being used. So that one
> > can understand the full scope of
> > symbols. See below example.
> >
> > #define RTE_REGEXDEV_CAPA_RUNTIME_COMPILATION_F (1ULL << 0)
> > /**< RegEx device does support compiling the rules at runtime unlike
> >  * loading only the pre-built rule database using
> >  * struct rte_regexdev_config::rule_db in rte_regexdev_configure()
> >  *
> >  * @see struct rte_regexdev_config::rule_db, rte_regexdev_configure()
> >  * @see struct rte_regexdev_info::regexdev_capa
> >  */
> >
> > > + *
> > > + * If dma_cookie_t is >=0 it's a DMA operation request cookie, <0 it's a error
> > > + * code.
> > > + * When using cookies, comply with the following rules:
> > > + * a) Cookies for each virtual queue are independent.
> > > + * b) For a virt queue, the cookie are monotonically incremented, when it reach
> > > + *    the INT_MAX, it wraps back to zero.
>
> I disagree with the INT_MAX (or INT32_MAX) value here. If we use that
> value, it means that we cannot use implicit wrap-around inside the CPU and
> have to check for the INT_MAX value. Better to:
> 1. Specify that it wraps at UINT16_MAX which allows us to just use a
> uint16_t internally and wrap-around automatically, or:
> 2. Specify that it wraps at a power-of-2 value >= UINT16_MAX, giving
> drivers the flexibility at what value to wrap around.

I think, (2) better than 1. I think, even better to wrap around the number of
descriptors configured in dev_configure()(We cake make this as the power of 2),


>
> > > + * c) The initial cookie of a virt queue is zero, after the device is stopped or
> > > + *    reset, the virt queue's cookie needs to be reset to zero.
> > > + * Example:
> > > + *    step-1: start one dmadev
> > > + *    step-2: enqueue a copy operation, the cookie return is 0
> > > + *    step-3: enqueue a copy operation again, the cookie return is 1
> > > + *    ...
> > > + *    step-101: stop the dmadev
> > > + *    step-102: start the dmadev
> > > + *    step-103: enqueue a copy operation, the cookie return is 0
> > > + *    ...
> > > + */
> >
> > Good explanation.
> >
> > > +typedef int32_t dma_cookie_t;
> >
>
> As I mentioned before, I'd just remove this, and use regular int types,
> with "ring_idx" as the name.

+1

>
> >
> > > +
> > > +/**
> > > + * dma_scatterlist - can hold scatter DMA operation request
> > > + */
> > > +struct dma_scatterlist {
> >
> > I prefer to change scatterlist -> sg
> > i.e rte_dma_sg
> >
> > > +       void *src;
> > > +       void *dst;
> > > +       uint32_t length;
> > > +};
> > > +
> >
> > > +
> > > +/**
> > > + * A structure used to retrieve the contextual information of
> > > + * an DMA device
> > > + */
> > > +struct rte_dmadev_info {
> > > +       /**
> > > +        * Fields filled by framewok
> >
> > typo.
> >
> > > +        */
> > > +       struct rte_device *device; /**< Generic Device information */
> > > +       const char *driver_name; /**< Device driver name */
> > > +       int socket_id; /**< Socket ID where memory is allocated */
> > > +
> > > +       /**
> > > +        * Specification fields filled by driver
> > > +        */
> > > +       uint64_t dev_capa; /**< Device capabilities (RTE_DMA_DEV_CAPA_) */
> > > +       uint16_t max_hw_queues; /**< Maximum number of HW queues. */
> > > +       uint16_t max_vqs_per_hw_queue;
> > > +       /**< Maximum number of virt queues to allocate per HW queue */
> > > +       uint16_t max_desc;
> > > +       /**< Maximum allowed number of virt queue descriptors */
> > > +       uint16_t min_desc;
> > > +       /**< Minimum allowed number of virt queue descriptors */
> >
> > Please add max_nb_segs. i.e maximum number of segments supported.
> >
> > > +
> > > +       /**
> > > +        * Status fields filled by driver
> > > +        */
> > > +       uint16_t nb_hw_queues; /**< Number of HW queues configured */
> > > +       uint16_t nb_vqs; /**< Number of virt queues configured */
> > > +};
> > > + i
> > > +
> > > +/**
> > > + * dma_address_type
> > > + */
> > > +enum dma_address_type {
> > > +       DMA_ADDRESS_TYPE_IOVA, /**< Use IOVA as dma address */
> > > +       DMA_ADDRESS_TYPE_VA, /**< Use VA as dma address */
> > > +};
> > > +
> > > +/**
> > > + * A structure used to configure a DMA device.
> > > + */
> > > +struct rte_dmadev_conf {
> > > +       enum dma_address_type addr_type; /**< Address type to used */
> >
> > I think, there are 3 kinds of limitations/capabilities.
> >
> > When the system is configured as IOVA as VA
> > 1) Device supports any VA address like memory from rte_malloc(),
> > rte_memzone(), malloc, stack memory
> > 2) Device support only VA address from rte_malloc(), rte_memzone() i.e
> > memory backed by hugepage and added to DMA map.
> >
> > When the system is configured as IOVA as PA
> > 1) Devices support only PA addresses .
> >
> > IMO, Above needs to be  advertised as capability and application needs
> > to align with that
> > and I dont think application requests the driver to work in any of the modes.
> >
> >
>
> I don't think we need this level of detail for addressing capabilities.
> Unless I'm missing something, the hardware should behave exactly as other
> hardware does taking in iova's.  If the user wants to check whether virtual
> addresses to pinned memory can be used directly, the user can call
> "rte_eal_iova_mode". We can't have a situation where some hardware uses one
> type of addresses and another hardware the other.
>
> Therefore, the only additional addressing capability we should need to
> report is that the hardware can use SVM/SVA and use virtual addresses not
> in hugepage memory.

+1.


>
> >
> > > +       uint16_t nb_hw_queues; /**< Number of HW-queues enable to use */
> > > +       uint16_t max_vqs; /**< Maximum number of virt queues to use */
> >
> > You need to what is max value allowed etc i.e it is based on
> > info_get() and mention the field
> > in info structure
> >
> >
> > > +
> > > +/**
> > > + * dma_transfer_direction
> > > + */
> > > +enum dma_transfer_direction {
> >
> > rte_dma_transter_direction
> >
> > > +       DMA_MEM_TO_MEM,
> > > +       DMA_MEM_TO_DEV,
> > > +       DMA_DEV_TO_MEM,
> > > +       DMA_DEV_TO_DEV,
> > > +};
> > > +
> > > +/**
> > > + * A structure used to configure a DMA virt queue.
> > > + */
> > > +struct rte_dmadev_queue_conf {
> > > +       enum dma_transfer_direction direction;
> >
> >
> > > +       /**< Associated transfer direction */
> > > +       uint16_t hw_queue_id; /**< The HW queue on which to create virt queue */
> > > +       uint16_t nb_desc; /**< Number of descriptor for this virt queue */
> > > +       uint64_t dev_flags; /**< Device specific flags */
> >
> > Use of this? Need more comments on this.
> > Since it is in slowpath, We can have non opaque names here based on
> > each driver capability.
> >
> >
> > > +       void *dev_ctx; /**< Device specific context */
> >
> > Use of this ? Need more comment ont this.
> >
>
> I think this should be dropped. We should not have any opaque
> device-specific info in these structs, rather if a particular device needs
> parameters we should call them out. Drivers for which it's not relevant can
> ignore them (and report same in capability if necessary). Since this is not
> a dataplane API, we aren't concerned too much about perf and can size the
> struct appropriately.
>
> >
> > Please add some good amount of reserved bits and have API to init this
> > structure for future ABI stability, say rte_dmadev_queue_config_init()
> > or so.
> >
>
> I don't think that is necessary. Since the config struct is used only as
> parameter to the config function, any changes to it can be managed by
> versioning that single function. Padding would only be necessary if we had
> an array of these config structs somewhere.

OK.

For some reason, the versioning API looks ugly to me in code instead of keeping
some rsvd fields look cool to me with init function.

But I agree. function versioning works in this case. No need to find other API
if tt is not general DPDK API practice.

In other libraries, I have seen such _init or function that can use
for this as well as filling default value
in some cases implementation values is not zero).
So that application can avoid memset for param structure.
Added rte_event_queue_default_conf_get() in eventdev spec for this.

No strong opinion on this.



>
> >
> > > +
> > > +/**
> > > + * A structure used to retrieve information of a DMA virt queue.
> > > + */
> > > +struct rte_dmadev_queue_info {
> > > +       enum dma_transfer_direction direction;
> >
> > A queue may support all directions so I think it should be a bitfield.
> >
> > > +       /**< Associated transfer direction */
> > > +       uint16_t hw_queue_id; /**< The HW queue on which to create virt queue */
> > > +       uint16_t nb_desc; /**< Number of descriptor for this virt queue */
> > > +       uint64_t dev_flags; /**< Device specific flags */
> > > +};
> > > +
> >
> > > +__rte_experimental
> > > +static inline dma_cookie_t
> > > +rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vq_id,
> > > +                  const struct dma_scatterlist *sg,
> > > +                  uint32_t sg_len, uint64_t flags)
> >
> > I would like to change this as:
> > rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vq_id, const struct
> > rte_dma_sg *src, uint32_t nb_src,
> > const struct rte_dma_sg *dst, uint32_t nb_dst) or so allow the use case like
> > src 30 MB copy can be splitted as written as 1 MB x 30 dst.
> >
> >
> >
> > > +{
> > > +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> > > +       return (*dev->copy_sg)(dev, vq_id, sg, sg_len, flags);
> > > +}
> > > +
> > > +/**
> > > + * @warning
> > > + * @b EXPERIMENTAL: this API may change without prior notice.
> > > + *
> > > + * Enqueue a fill operation onto the DMA virt queue
> > > + *
> > > + * This queues up a fill operation to be performed by hardware, but does not
> > > + * trigger hardware to begin that operation.
> > > + *
> > > + * @param dev_id
> > > + *   The identifier of the device.
> > > + * @param vq_id
> > > + *   The identifier of virt queue.
> > > + * @param pattern
> > > + *   The pattern to populate the destination buffer with.
> > > + * @param dst
> > > + *   The address of the destination buffer.
> > > + * @param length
> > > + *   The length of the destination buffer.
> > > + * @param flags
> > > + *   An opaque flags for this operation.
> >
> > PLEASE REMOVE opaque stuff from fastpath it will be a pain for
> > application writers as
> > they need to write multiple combinations of fastpath. flags are OK, if
> > we have a valid
> > generic flag now to control the transfer behavior.
> >
>
> +1. Flags need to be explicitly listed. If we don't have any flags for now,
> we can specify that the value must be given as zero and it's for future
> use.

OK.

>
> >
> > > +/**
> > > + * @warning
> > > + * @b EXPERIMENTAL: this API may change without prior notice.
> > > + *
> > > + * Add a fence to force ordering between operations
> > > + *
> > > + * This adds a fence to a sequence of operations to enforce ordering, such that
> > > + * all operations enqueued before the fence must be completed before operations
> > > + * after the fence.
> > > + * NOTE: Since this fence may be added as a flag to the last operation enqueued,
> > > + * this API may not function correctly when called immediately after an
> > > + * "rte_dmadev_perform" call i.e. before any new operations are enqueued.
> > > + *
> > > + * @param dev_id
> > > + *   The identifier of the device.
> > > + * @param vq_id
> > > + *   The identifier of virt queue.
> > > + *
> > > + * @return
> > > + *   - =0: Successful add fence.
> > > + *   - <0: Failure to add fence.
> > > + *
> > > + * NOTE: The caller must ensure that the input parameter is valid and the
> > > + *       corresponding device supports the operation.
> > > + */
> > > +__rte_experimental
> > > +static inline int
> > > +rte_dmadev_fence(uint16_t dev_id, uint16_t vq_id)
> > > +{
> > > +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> > > +       return (*dev->fence)(dev, vq_id);
> > > +}
> >
> > Since HW submission is in a queue(FIFO) the ordering is always
> > maintained. Right?
> > Could you share more details and use case of fence() from
> > driver/application PoV?
> >
>
> There are different kinds of ordering to consider, ordering of completions
> and the ordering of operations. While jobs are reported as completed to the
> user in order, for performance hardware, may overlap individual jobs within
> a burst (or even across bursts). Therefore, we need a fence operation to
> inform hardware that one job should not be started until the other has
> fully completed.

Got it. In order to save space if first CL size for fastpath(Saving 8B
for the pointer) and to avoid
function overhead, Can we use one bit of flags of op function to
enable the fence?

>
> >
> > > +
> > > +/**
> > > + * @warning
> > > + * @b EXPERIMENTAL: this API may change without prior notice.
> > > + *
> > > + * Trigger hardware to begin performing enqueued operations
> > > + *
> > > + * This API is used to write the "doorbell" to the hardware to trigger it
> > > + * to begin the operations previously enqueued by rte_dmadev_copy/fill()
> > > + *
> > > + * @param dev_id
> > > + *   The identifier of the device.
> > > + * @param vq_id
> > > + *   The identifier of virt queue.
> > > + *
> > > + * @return
> > > + *   - =0: Successful trigger hardware.
> > > + *   - <0: Failure to trigger hardware.
> > > + *
> > > + * NOTE: The caller must ensure that the input parameter is valid and the
> > > + *       corresponding device supports the operation.
> > > + */
> > > +__rte_experimental
> > > +static inline int
> > > +rte_dmadev_perform(uint16_t dev_id, uint16_t vq_id)
> > > +{
> > > +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> > > +       return (*dev->perform)(dev, vq_id);
> > > +}
> >
> > Since we have additional function call overhead in all the
> > applications for this scheme, I would like to understand
> > the use of doing this way vs enq does the doorbell implicitly from
> > driver/application PoV?
> >
>
> In our benchmarks it's just faster. When we tested it, the overhead of the
> function calls was noticably less than the cost of building up the
> parameter array(s) for passing the jobs in as a burst. [We don't see this
> cost with things like NIC I/O since DPDK tends to already have the mbuf
> fully populated before the TX call anyway.]

OK. I agree with stack population.

My question was more on doing implicit doorbell update enq. Is doorbell write
costly in other HW compare to a function call? In our HW, it is just write of
the number of instructions written in a register.

Also, we need to again access the internal PMD memory structure to find
where to write etc if it is a separate function.


>
> >
> > > +
> > > +/**
> > > + * @warning
> > > + * @b EXPERIMENTAL: this API may change without prior notice.
> > > + *
> > > + * Returns the number of operations that have been successful completed.
> > > + *
> > > + * @param dev_id
> > > + *   The identifier of the device.
> > > + * @param vq_id
> > > + *   The identifier of virt queue.
> > > + * @param nb_cpls
> > > + *   The maximum number of completed operations that can be processed.
> > > + * @param[out] cookie
> > > + *   The last completed operation's cookie.
> > > + * @param[out] has_error
> > > + *   Indicates if there are transfer error.
> > > + *
> > > + * @return
> > > + *   The number of operations that successful completed.
> >
> > successfully
> >
> > > + *
> > > + * NOTE: The caller must ensure that the input parameter is valid and the
> > > + *       corresponding device supports the operation.
> > > + */
> > > +__rte_experimental
> > > +static inline uint16_t
> > > +rte_dmadev_completed(uint16_t dev_id, uint16_t vq_id, const uint16_t nb_cpls,
> > > +                    dma_cookie_t *cookie, bool *has_error)
> > > +{
> > > +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> > > +       has_error = false;
> > > +       return (*dev->completed)(dev, vq_id, nb_cpls, cookie, has_error);
> >
> > It may be better to have cookie/ring_idx as third argument.
> >
>
> No strong opinions here, but having it as in the code above means all
> input parameters come before all output, which makes sense to me.

+1

>
> > > +}
> > > +
> > > +/**
> > > + * @warning
> > > + * @b EXPERIMENTAL: this API may change without prior notice.
> > > + *
> > > + * Returns the number of operations that failed to complete.
> > > + * NOTE: This API was used when rte_dmadev_completed has_error was set.
> > > + *
> > > + * @param dev_id
> > > + *   The identifier of the device.
> > > + * @param vq_id
> > > + *   The identifier of virt queue.
> > (> + * @param nb_status
> > > + *   Indicates the size  of status array.
> > > + * @param[out] status
> > > + *   The error code of operations that failed to complete.
> > > + * @param[out] cookie
> > > + *   The last failed completed operation's cookie.
> > > + *
> > > + * @return
> > > + *   The number of operations that failed to complete.
> > > + *
> > > + * NOTE: The caller must ensure that the input parameter is valid and the
> > > + *       corresponding device supports the operation.
> > > + */
> > > +__rte_experimental
> > > +static inline uint16_t
> > > +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vq_id,
> > > +                          const uint16_t nb_status, uint32_t *status,
> > > +                          dma_cookie_t *cookie)
> >
> > IMO, it is better to move cookie/rind_idx at 3.
> > Why it would return any array of errors? since it called after
> > rte_dmadev_completed() has
> > has_error. Is it better to change
> >
> > rte_dmadev_error_status((uint16_t dev_id, uint16_t vq_id, dma_cookie_t
> > *cookie,  uint32_t *status)
> >
> > I also think, we may need to set status as bitmask and enumerate all
> > the combination of error codes
> > of all the driver and return string from driver existing rte_flow_error
> >
> > See
> > struct rte_flow_error {
> >         enum rte_flow_error_type type; /**< Cause field and error types. */
> >         const void *cause; /**< Object responsible for the error. */
> >         const char *message; /**< Human-readable error message. */
> > };
> >
>
> I think we need a multi-return value API here, as we may add operations in
> future which have non-error status values to return. The obvious case is
> DMA engines which support "compare" operations. In that case a successful
> compare (as in there were no DMA or HW errors) can return "equal" or
> "not-equal" as statuses. For general "copy" operations, the faster
> completion op can be used to just return successful values (and only call
> this status version on error), while apps using those compare ops or a
> mixture of copy and compare ops, would always use the slower one that
> returns status values for each and every op..
>
> The ioat APIs used 32-bit integer values for this status array so as to
> allow e.g. 16-bits for error code and 16-bits for future status values. For
> most operations there should be a fairly small set of things that can go
> wrong, i.e. bad source address, bad destination address or invalid length.
> Within that we may have a couple of specifics for why an address is bad,
> but even so I don't think we need to start having multiple bit
> combinations.

OK. What is the purpose of errors status? Is it for application printing it or
Does the application need to take any action based on specific error requests?

If the former is scope, then we need to define the standard enum value
for the error right?
ie. uint32_t *status needs to change to enum rte_dma_error or so.



>
> > > +{
> > > +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> > > +       return (*dev->completed_fails)(dev, vq_id, nb_status, status, cookie);
> > > +}
> > > +
> > > +struct rte_dmadev_stats {
> > > +       uint64_t enqueue_fail_count;
> > > +       /**< Conut of all operations which failed enqueued */
> > > +       uint64_t enqueued_count;
> > > +       /**< Count of all operations which successful enqueued */
> > > +       uint64_t completed_fail_count;
> > > +       /**< Count of all operations which failed to complete */
> > > +       uint64_t completed_count;
> > > +       /**< Count of all operations which successful complete */
> > > +};
> >
> > We need to have capability API to tell which items are
> > updated/supported by the driver.
> >
>
> I also would remove the enqueue fail counts, since they are better counted
> by the app. If a driver reports 20,000 failures we have no way of knowing
> if that is 20,000 unique operations which failed to enqueue or a single
> operation which failed to enqueue 20,000 times but succeeded on attempt
> 20,001.
>
> >
> > > diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
> > > new file mode 100644
> > > index 0000000..a3afea2
> > > --- /dev/null
> > > +++ b/lib/dmadev/rte_dmadev_core.h
> > > @@ -0,0 +1,98 @@
> > > +/* SPDX-License-Identifier: BSD-3-Clause
> > > + * Copyright 2021 HiSilicon Limited.
> > > + */
> > > +
> > > +#ifndef _RTE_DMADEV_CORE_H_
> > > +#define _RTE_DMADEV_CORE_H_
> > > +
> > > +/**
> > > + * @file
> > > + *
> > > + * RTE DMA Device internal header.
> > > + *
> > > + * This header contains internal data types. But they are still part of the
> > > + * public API because they are used by inline public functions.
> > > + */
> > > +
> > > +struct rte_dmadev;
> > > +
> > > +typedef dma_cookie_t (*dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vq_id,
> > > +                                     void *src, void *dst,
> > > +                                     uint32_t length, uint64_t flags);
> > > +/**< @internal Function used to enqueue a copy operation. */
> >
> > To avoid namespace conflict(as it is public API) use rte_
> >
> >
> > > +
> > > +/**
> > > + * The data structure associated with each DMA device.
> > > + */
> > > +struct rte_dmadev {
> > > +       /**< Enqueue a copy operation onto the DMA device. */
> > > +       dmadev_copy_t copy;
> > > +       /**< Enqueue a scatter list copy operation onto the DMA device. */
> > > +       dmadev_copy_sg_t copy_sg;
> > > +       /**< Enqueue a fill operation onto the DMA device. */
> > > +       dmadev_fill_t fill;
> > > +       /**< Enqueue a scatter list fill operation onto the DMA device. */
> > > +       dmadev_fill_sg_t fill_sg;
> > > +       /**< Add a fence to force ordering between operations. */
> > > +       dmadev_fence_t fence;
> > > +       /**< Trigger hardware to begin performing enqueued operations. */
> > > +       dmadev_perform_t perform;
> > > +       /**< Returns the number of operations that successful completed. */
> > > +       dmadev_completed_t completed;
> > > +       /**< Returns the number of operations that failed to complete. */
> > > +       dmadev_completed_fails_t completed_fails;
> >
> > We need to limit fastpath items in 1 CL
> >
>
> I don't think that is going to be possible. I also would like to see
> numbers to check if we benefit much from having these fastpath ops separate
> from the regular ops.
>
> > > +
> > > +       void *dev_private; /**< PMD-specific private data */
> > > +       const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD */
> > > +
> > > +       uint16_t dev_id; /**< Device ID for this instance */
> > > +       int socket_id; /**< Socket ID where memory is allocated */
> > > +       struct rte_device *device;
> > > +       /**< Device info. supplied during device initialization */
> > > +       const char *driver_name; /**< Driver info. supplied by probing */
> > > +       char name[RTE_DMADEV_NAME_MAX_LEN]; /**< Device name */
> > > +
> > > +       RTE_STD_C11
> > > +       uint8_t attached : 1; /**< Flag indicating the device is attached */
> > > +       uint8_t started : 1; /**< Device state: STARTED(1)/STOPPED(0) */
> >
> > Add a couple of reserved fields for future ABI stability.
> >
> > > +
> > > +} __rte_cache_aligned;
> > > +
> > > +extern struct rte_dmadev rte_dmadevices[];
> > > +

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-05 15:55     ` Jerin Jacob
@ 2021-07-05 17:16       ` Bruce Richardson
  2021-07-07  8:08         ` Jerin Jacob
  0 siblings, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-07-05 17:16 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Chengwen Feng, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	dpdk-dev, Morten Brørup, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma,
	Radha Mohan Chintakuntla

On Mon, Jul 05, 2021 at 09:25:34PM +0530, Jerin Jacob wrote:
> 
> On Mon, Jul 5, 2021 at 4:22 PM Bruce Richardson
> <bruce.richardson@intel.com> wrote:
> >
> > On Sun, Jul 04, 2021 at 03:00:30PM +0530, Jerin Jacob wrote:
> > > On Fri, Jul 2, 2021 at 6:51 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
> > > >
> > > > This patch introduces 'dmadevice' which is a generic type of DMA
> > > > device.
<snip>
> >
> > +1 and the terminology with regards to queues and channels. With our ioat
> > hardware, each HW queue was called a channel for instance.
> 
> Looks like <dmadev> <> <channel> can cover all the use cases, if the
> HW has more than
> 1 queues it can be exposed as separate dmadev dev.
> 

Fine for me.

However, just to confirm that Morten's suggestion of using a
(device-specific void *) channel pointer rather than dev_id + channel_id
pair of parameters won't work for you? You can't store a pointer or dev
index in the channel struct in the driver?

> 
<snip>
> > > > + *
> > > > + * If dma_cookie_t is >=0 it's a DMA operation request cookie, <0 it's a error
> > > > + * code.
> > > > + * When using cookies, comply with the following rules:
> > > > + * a) Cookies for each virtual queue are independent.
> > > > + * b) For a virt queue, the cookie are monotonically incremented, when it reach
> > > > + *    the INT_MAX, it wraps back to zero.
> >
> > I disagree with the INT_MAX (or INT32_MAX) value here. If we use that
> > value, it means that we cannot use implicit wrap-around inside the CPU and
> > have to check for the INT_MAX value. Better to:
> > 1. Specify that it wraps at UINT16_MAX which allows us to just use a
> > uint16_t internally and wrap-around automatically, or:
> > 2. Specify that it wraps at a power-of-2 value >= UINT16_MAX, giving
> > drivers the flexibility at what value to wrap around.
> 
> I think, (2) better than 1. I think, even better to wrap around the number of
> descriptors configured in dev_configure()(We cake make this as the power of 2),
> 

Interesting, I hadn't really considered that before. My only concern
would be if an app wants to keep values in the app ring for a while after
they have been returned from dmadev. I thought it easier to have the full
16-bit counter value returned to the user to give the most flexibility,
given that going from that to any power-of-2 ring size smaller is a trivial
operation.

Overall, while my ideal situation is to always have a 0..UINT16_MAX return
value from the function, I can live with your suggestion of wrapping at
ring_size, since drivers will likely do that internally anyway.
I think wrapping at INT32_MAX is too awkward and will be error prone since
we can't rely on hardware automatically wrapping to zero, nor on the driver
having pre-masked the value.

> >
> > > > + * c) The initial cookie of a virt queue is zero, after the device is stopped or
> > > > + *    reset, the virt queue's cookie needs to be reset to zero.
<snip>
> > >
> > > Please add some good amount of reserved bits and have API to init this
> > > structure for future ABI stability, say rte_dmadev_queue_config_init()
> > > or so.
> > >
> >
> > I don't think that is necessary. Since the config struct is used only as
> > parameter to the config function, any changes to it can be managed by
> > versioning that single function. Padding would only be necessary if we had
> > an array of these config structs somewhere.
> 
> OK.
> 
> For some reason, the versioning API looks ugly to me in code instead of keeping
> some rsvd fields look cool to me with init function.
> 
> But I agree. function versioning works in this case. No need to find other API
> if tt is not general DPDK API practice.
> 

The one thing I would suggest instead of the padding is for the internal
APIS, to pass the struct size through, since we can't version those - and
for padding we can't know whether any replaced padding should be used or
not. Specifically:

	typedef int (*rte_dmadev_configure_t)(struct rte_dmadev *dev, struct
			rte_dmadev_conf *cfg, size_t cfg_size);

but for the public function:

	int
	rte_dmadev_configure(struct rte_dmadev *dev, struct
			rte_dmadev_conf *cfg)
	{
		...
		ret = dev->ops.configure(dev, cfg, sizeof(*cfg));
		...
	}

Then if we change the structure and version the config API, the driver can
tell from the size what struct version it is and act accordingly. Without
that, each time the struct changed, we'd have to add a new function pointer
to the device ops.

> In other libraries, I have seen such _init or function that can use
> for this as well as filling default value
> in some cases implementation values is not zero).
> So that application can avoid memset for param structure.
> Added rte_event_queue_default_conf_get() in eventdev spec for this.
> 

I think that would largely have the same issues, unless it returned a
pointer to data inside the driver - and which therefore could not be
modified. Alternatively it would mean that the memory would have been
allocated in the driver and we would need to ensure proper cleanup
functions were called to free memory afterwards. Supporting having the
config parameter as a local variable I think makes things a lot easier.

> No strong opinion on this.
> 
> 
> 
> >
> > >
> > > > +
> > > > +/**
> > > > + * A structure used to retrieve information of a DMA virt queue.
> > > > + */
> > > > +struct rte_dmadev_queue_info {
> > > > +       enum dma_transfer_direction direction;
> > >
> > > A queue may support all directions so I think it should be a bitfield.
> > >
> > > > +       /**< Associated transfer direction */
> > > > +       uint16_t hw_queue_id; /**< The HW queue on which to create virt queue */
> > > > +       uint16_t nb_desc; /**< Number of descriptor for this virt queue */
> > > > +       uint64_t dev_flags; /**< Device specific flags */
> > > > +};
> > > > +
> > >
> > > > +__rte_experimental
> > > > +static inline dma_cookie_t
> > > > +rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vq_id,
> > > > +                  const struct dma_scatterlist *sg,
> > > > +                  uint32_t sg_len, uint64_t flags)
> > >
> > > I would like to change this as:
> > > rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vq_id, const struct
> > > rte_dma_sg *src, uint32_t nb_src,
> > > const struct rte_dma_sg *dst, uint32_t nb_dst) or so allow the use case like
> > > src 30 MB copy can be splitted as written as 1 MB x 30 dst.
> > >

Out of interest, do you see much benefit (and in what way) from having the
scatter-gather support? Unlike sending 5 buffers in one packet rather than
5 buffers in 5 packets to a NIC, copying an array of memory in one op vs
multiple is functionally identical.

> > >
> > >
<snip>
> Got it. In order to save space if first CL size for fastpath(Saving 8B
> for the pointer) and to avoid
> function overhead, Can we use one bit of flags of op function to
> enable the fence?
> 

The original ioat implementation did exactly that. However, I then
discovered that because a fence logically belongs between two operations,
does the fence flag on an operation mean "don't do any jobs after this
until this job has completed" or does it mean "don't start this job until
all previous jobs have completed". [Or theoretically does it mean both :-)]
Naturally, some hardware does it the former way (i.e. fence flag goes on
last op before fence), while other hardware the latter way (i.e. fence flag
goes on first op after the fence). Therefore, since fencing is about
ordering *between* two (sets of) jobs, I decided that it should do exactly
that and go between two jobs, so there is no ambiguity!

However, I'm happy enough to switch to having a fence flag, but I think if
we do that, it should be put in the "first job after fence" case, because
it is always easier to modify a previously written job if we need to, than
to save the flag for a future one.

Alternatively, if we keep the fence as a separate function, I'm happy
enough for it not to be on the same cacheline as the "hot" operations,
since fencing will always introduce a small penalty anyway.

> >
> > >
<snip>
> > > Since we have additional function call overhead in all the
> > > applications for this scheme, I would like to understand
> > > the use of doing this way vs enq does the doorbell implicitly from
> > > driver/application PoV?
> > >
> >
> > In our benchmarks it's just faster. When we tested it, the overhead of the
> > function calls was noticably less than the cost of building up the
> > parameter array(s) for passing the jobs in as a burst. [We don't see this
> > cost with things like NIC I/O since DPDK tends to already have the mbuf
> > fully populated before the TX call anyway.]
> 
> OK. I agree with stack population.
> 
> My question was more on doing implicit doorbell update enq. Is doorbell write
> costly in other HW compare to a function call? In our HW, it is just write of
> the number of instructions written in a register.
> 
> Also, we need to again access the internal PMD memory structure to find
> where to write etc if it is a separate function.
> 

The cost varies depending on a number of factors - even writing to a single
HW register can be very slow if that register is mapped as device
(uncacheable) memory, since (AFAIK) it will act as a full fence and wait
for the write to go all the way to hardware. For more modern HW, the cost
can be lighter. However, any cost of HW writes is going to be the same
whether its a separate function call or not.

However, the main thing about the doorbell update is that it's a
once-per-burst thing, rather than a once-per-job. Therefore, even if you
have to re-read the struct memory (which is likely still somewhere in your
cores' cache), any extra small cost of doing so is to be amortized over the
cost of a whole burst of copies.

> 
> >
> > >
<snip>
> > > > +
> > > > +/**
> > > > + * @warning
> > > > + * @b EXPERIMENTAL: this API may change without prior notice.
> > > > + *
> > > > + * Returns the number of operations that failed to complete.
> > > > + * NOTE: This API was used when rte_dmadev_completed has_error was set.
> > > > + *
> > > > + * @param dev_id
> > > > + *   The identifier of the device.
> > > > + * @param vq_id
> > > > + *   The identifier of virt queue.
> > > (> + * @param nb_status
> > > > + *   Indicates the size  of status array.
> > > > + * @param[out] status
> > > > + *   The error code of operations that failed to complete.
> > > > + * @param[out] cookie
> > > > + *   The last failed completed operation's cookie.
> > > > + *
> > > > + * @return
> > > > + *   The number of operations that failed to complete.
> > > > + *
> > > > + * NOTE: The caller must ensure that the input parameter is valid and the
> > > > + *       corresponding device supports the operation.
> > > > + */
> > > > +__rte_experimental
> > > > +static inline uint16_t
> > > > +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vq_id,
> > > > +                          const uint16_t nb_status, uint32_t *status,
> > > > +                          dma_cookie_t *cookie)
> > >
> > > IMO, it is better to move cookie/rind_idx at 3.
> > > Why it would return any array of errors? since it called after
> > > rte_dmadev_completed() has
> > > has_error. Is it better to change
> > >
> > > rte_dmadev_error_status((uint16_t dev_id, uint16_t vq_id, dma_cookie_t
> > > *cookie,  uint32_t *status)
> > >
> > > I also think, we may need to set status as bitmask and enumerate all
> > > the combination of error codes
> > > of all the driver and return string from driver existing rte_flow_error
> > >
> > > See
> > > struct rte_flow_error {
> > >         enum rte_flow_error_type type; /**< Cause field and error types. */
> > >         const void *cause; /**< Object responsible for the error. */
> > >         const char *message; /**< Human-readable error message. */
> > > };
> > >
> >
> > I think we need a multi-return value API here, as we may add operations in
> > future which have non-error status values to return. The obvious case is
> > DMA engines which support "compare" operations. In that case a successful
> > compare (as in there were no DMA or HW errors) can return "equal" or
> > "not-equal" as statuses. For general "copy" operations, the faster
> > completion op can be used to just return successful values (and only call
> > this status version on error), while apps using those compare ops or a
> > mixture of copy and compare ops, would always use the slower one that
> > returns status values for each and every op..
> >
> > The ioat APIs used 32-bit integer values for this status array so as to
> > allow e.g. 16-bits for error code and 16-bits for future status values. For
> > most operations there should be a fairly small set of things that can go
> > wrong, i.e. bad source address, bad destination address or invalid length.
> > Within that we may have a couple of specifics for why an address is bad,
> > but even so I don't think we need to start having multiple bit
> > combinations.
> 
> OK. What is the purpose of errors status? Is it for application printing it or
> Does the application need to take any action based on specific error requests?

It's largely for information purposes, but in the case of SVA/SVM errors
could occur due to the memory not being pinned, i.e. a page fault, in some
cases. If that happens, then it's up the app to either touch the memory and
retry the copy, or to do a SW memcpy as a fallback.

In other error cases, I think it's good to tell the application if it's
passing around bad data, or data that is beyond the scope of hardware, e.g.
a copy that is beyond what can be done in a single transaction for a HW
instance. Given that there are always things that can go wrong, I think we
need some error reporting mechanism.

> If the former is scope, then we need to define the standard enum value
> for the error right?
> ie. uint32_t *status needs to change to enum rte_dma_error or so.
> 
Sure. Perhaps an error/status structure either is an option, where we
explicitly call out error info from status info.

> 
> 
<snip to end>

/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-04  9:30 ` Jerin Jacob
  2021-07-05 10:52   ` Bruce Richardson
@ 2021-07-06  3:01   ` fengchengwen
  2021-07-06 10:01     ` Bruce Richardson
  1 sibling, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-07-06  3:01 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Thomas Monjalon, Ferruh Yigit, Richardson, Bruce, Jerin Jacob,
	dpdk-dev, Morten Brørup, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma,
	Radha Mohan Chintakuntla

Many thanks, mostly OK, and a few comment inline

On 2021/7/4 17:30, Jerin Jacob wrote:
> On Fri, Jul 2, 2021 at 6:51 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
>>
>> This patch introduces 'dmadevice' which is a generic type of DMA
>> device.
...
>> +#include <rte_compat.h>
> 
> Sort in alphabetical order.
> 
>> +
>> +/**
>> + * dma_cookie_t - an opaque DMA cookie
> 
> Since we are defining the behaviour is not opaque any more.
> I think, it is better to call ring_idx or so.
> 


This type is designed to have two meanings, return <0 on failure and return >=0 on success.

How about follwing definition:
    typedef int dma_ring_index_t;

if >= 0, it's value range is [0, 65535] = uint16_t, so driver implementation will simply.
if <0, then men enqueue failure

For driver, it could hold uint16_t ring_index, if enquer fail just return fail, else return
the current ring_index, and update it by: ring_index++;

>> +
>> +/**
>> + * A structure used to retrieve the contextual information of
>> + * an DMA device
>> + */
>> +struct rte_dmadev_info {
>> +       /**
>> +        * Fields filled by framewok
> 
> typo.
> 
>> +        */
>> +       struct rte_device *device; /**< Generic Device information */
>> +       const char *driver_name; /**< Device driver name */
>> +       int socket_id; /**< Socket ID where memory is allocated */
>> +
>> +       /**
>> +        * Specification fields filled by driver
>> +        */
>> +       uint64_t dev_capa; /**< Device capabilities (RTE_DMA_DEV_CAPA_) */
>> +       uint16_t max_hw_queues; /**< Maximum number of HW queues. */
>> +       uint16_t max_vqs_per_hw_queue;
>> +       /**< Maximum number of virt queues to allocate per HW queue */
>> +       uint16_t max_desc;
>> +       /**< Maximum allowed number of virt queue descriptors */
>> +       uint16_t min_desc;
>> +       /**< Minimum allowed number of virt queue descriptors */
> 
> Please add max_nb_segs. i.e maximum number of segments supported.

Do you means something like "burst_size" ?

> 
>> +
>> +       /**
>> +        * Status fields filled by driver
>> +        */
>> +       uint16_t nb_hw_queues; /**< Number of HW queues configured */
>> +       uint16_t nb_vqs; /**< Number of virt queues configured */
>> +};
>> + i
>> +
>> +/**
>> + * dma_address_type
>> + */
>> +enum dma_address_type {
>> +       DMA_ADDRESS_TYPE_IOVA, /**< Use IOVA as dma address */
>> +       DMA_ADDRESS_TYPE_VA, /**< Use VA as dma address */
>> +};
>> +
>> +/**
>> + * A structure used to configure a DMA device.
>> + */
>> +struct rte_dmadev_conf {
>> +       enum dma_address_type addr_type; /**< Address type to used */
> 
> I think, there are 3 kinds of limitations/capabilities.
> 
> When the system is configured as IOVA as VA
> 1) Device supports any VA address like memory from rte_malloc(),
> rte_memzone(), malloc, stack memory
> 2) Device support only VA address from rte_malloc(), rte_memzone() i.e
> memory backed by hugepage and added to DMA map.
> 
> When the system is configured as IOVA as PA
> 1) Devices support only PA addresses .
> 
> IMO, Above needs to be  advertised as capability and application needs
> to align with that
> and I dont think application requests the driver to work in any of the modes.
> 

OK, Let's put together our ideas on address type:

There are three mode, we may define as:
	IOVA_as_VA-ALL     ---for device which may need support SVA feature
                           ---may also be a CPU memcpy 'device'
	IOVA_as_VA         ---for device which need support IOMMU
	IOVA_as_PA

There are many combination of the modes which device supports: eg. some device
may only support IOVA_as_PA, some may only support IOVA_as_VA, and some support
IOVA_as_PA and IOVA_as_VA. The specific runtime type is determined by the vfio
and drive capability(e.g RTE_PCI_DRV_NEED_IOVA_AS_VA).

So we already define two capabilities for this:
	#define RTE_DMA_DEV_CAPA_IOVA	(1ull << 8) /**< Support IOVA as DMA address */
					---this cover IOVA_as_VA and IOVA_as_PA
	#define RTE_DMA_DEV_CAPA_VA	(1ull << 9) /**< Support VA as DMA address */
					---this cover IOVA_as_VA-ALL
for a device which don't support SVA:
	only declare RTE_DMA_DEV_CAPA_IOVA
for a device which support SVA:
	delcare RTE_DAMA_DEV_CAPA_IOVA
	delcare RTE_DMA_DEV_CAPA_VA (only when IOMMU enabled and 'SVA flag' was set)
for a CPU memcpy device:
	only declare RTE_DMA_DEV_CAPA_VA

As application:
- if RTE_DMA_DEV_CAPA_VA support, then it could pass any va address to the DMA,
- else if RTE_DMA_DEV_CAPA_IOVA support, then it should pass iova address to the DMA
- else the DMA device should not exist.

> 
>> +__rte_experimental
>> +static inline dma_cookie_t
>> +rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vq_id,
>> +                  const struct dma_scatterlist *sg,
>> +                  uint32_t sg_len, uint64_t flags)
> 
> I would like to change this as:
> rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vq_id, const struct
> rte_dma_sg *src, uint32_t nb_src,
> const struct rte_dma_sg *dst, uint32_t nb_dst) or so allow the use case like
> src 30 MB copy can be splitted as written as 1 MB x 30 dst.
> 

There are already too many arguments, and the above use case could split 30 sg-item.

>> +__rte_experimental
>> +static inline int
>> +rte_dmadev_fence(uint16_t dev_id, uint16_t vq_id)
>> +{
>> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
>> +       return (*dev->fence)(dev, vq_id);
>> +}
> 
> Since HW submission is in a queue(FIFO) the ordering is always
> maintained. Right?
> Could you share more details and use case of fence() from
> driver/application PoV?
> 

For Kunpeng DMA, hardware supports parallel execution of requests in the same queue,

It applies to the following scenarios: communication with the remote end is involved. driver
should ensure issure 'doorbell' after data was full written.

> 
>> +
>> +/**
>> + * @warning
>> + * @b EXPERIMENTAL: this API may change without prior notice.
>> + *
>> + * Trigger hardware to begin performing enqueued operations
>> + *
>> + * This API is used to write the "doorbell" to the hardware to trigger it
>> + * to begin the operations previously enqueued by rte_dmadev_copy/fill()
>> + *
>> + * @param dev_id
>> + *   The identifier of the device.
>> + * @param vq_id
>> + *   The identifier of virt queue.
>> + *
>> + * @return
>> + *   - =0: Successful trigger hardware.
>> + *   - <0: Failure to trigger hardware.
>> + *
>> + * NOTE: The caller must ensure that the input parameter is valid and the
>> + *       corresponding device supports the operation.
>> + */
>> +__rte_experimental
>> +static inline int
>> +rte_dmadev_perform(uint16_t dev_id, uint16_t vq_id)
>> +{
>> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
>> +       return (*dev->perform)(dev, vq_id);
>> +}
> 
> Since we have additional function call overhead in all the
> applications for this scheme, I would like to understand
> the use of doing this way vs enq does the doorbell implicitly from
> driver/application PoV?
> 

Because we split the burst operation into multiple substeps: for each enq we
don't issue 'doorbell', and at last call perform() to issue 'doorbell'.

For ARM platform, should call mb ops when issue 'doorbell', if call mb ops
every enq, it may lead significant performance degradation.

>> +__rte_experimental
>> +static inline uint16_t
>> +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vq_id,
>> +                          const uint16_t nb_status, uint32_t *status,
>> +                          dma_cookie_t *cookie)
> 
> IMO, it is better to move cookie/rind_idx at 3.
> Why it would return any array of errors? since it called after
> rte_dmadev_completed() has
> has_error. Is it better to change
> 
> rte_dmadev_error_status((uint16_t dev_id, uint16_t vq_id, dma_cookie_t
> *cookie,  uint32_t *status)
> 
> I also think, we may need to set status as bitmask and enumerate all
> the combination of error codes
> of all the driver and return string from driver existing rte_flow_error
> 

bitmask has limit for most 32 (or we can extend 64), and also the rte_flow_error is
heavy.

Considering that errors are a small number of scenarios, so it's OK to
pass status array, and status have 32bit it could denotes a very large number
of errcode.

>> +
>> +struct rte_dmadev_stats {
>> +       uint64_t enqueue_fail_count;
>> +       /**< Conut of all operations which failed enqueued */
>> +       uint64_t enqueued_count;
>> +       /**< Count of all operations which successful enqueued */
>> +       uint64_t completed_fail_count;
>> +       /**< Count of all operations which failed to complete */
>> +       uint64_t completed_count;
>> +       /**< Count of all operations which successful complete */
>> +};
> 
> We need to have capability API to tell which items are
> updated/supported by the driver.
> 

There are fewer fields, and I don't think it's necessary to add capability API,
for those who don't support, it could don't implement the callback.
For those support, these fields are minimum et.

> 
>> diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
>> new file mode 100644
>> index 0000000..a3afea2
>> --- /dev/null
>> +++ b/lib/dmadev/rte_dmadev_core.h
>> @@ -0,0 +1,98 @@
>> +/* SPDX-License-Identifier: BSD-3-Clause
>> + * Copyright 2021 HiSilicon Limited.
>> + */
>> +
>> +#ifndef _RTE_DMADEV_CORE_H_
>> +#define _RTE_DMADEV_CORE_H_
>> +
>> +/**
>> + * @file
>> + *
>> + * RTE DMA Device internal header.
>> + *
>> + * This header contains internal data types. But they are still part of the
>> + * public API because they are used by inline public functions.
>> + */
>> +
>> +struct rte_dmadev;
>> +
>> +typedef dma_cookie_t (*dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vq_id,
>> +                                     void *src, void *dst,
>> +                                     uint32_t length, uint64_t flags);
>> +/**< @internal Function used to enqueue a copy operation. */
> 
> To avoid namespace conflict(as it is public API) use rte_

These are internal function used by driver, not application.
and the eth/regexdev_core also defined without rte_

So I think it should remain as it is.

> 
> 
>> +
>> +/**
>> + * The data structure associated with each DMA device.
>> + */
>> +struct rte_dmadev {
>> +       /**< Enqueue a copy operation onto the DMA device. */
>> +       dmadev_copy_t copy;
>> +       /**< Enqueue a scatter list copy operation onto the DMA device. */
>> +       dmadev_copy_sg_t copy_sg;
>> +       /**< Enqueue a fill operation onto the DMA device. */
>> +       dmadev_fill_t fill;
>> +       /**< Enqueue a scatter list fill operation onto the DMA device. */
>> +       dmadev_fill_sg_t fill_sg;
>> +       /**< Add a fence to force ordering between operations. */
>> +       dmadev_fence_t fence;
>> +       /**< Trigger hardware to begin performing enqueued operations. */
>> +       dmadev_perform_t perform;
>> +       /**< Returns the number of operations that successful completed. */
>> +       dmadev_completed_t completed;
>> +       /**< Returns the number of operations that failed to complete. */
>> +       dmadev_completed_fails_t completed_fails;
> 
> We need to limit fastpath items in 1 CL

yes, currently there are 8 callback, which just fill one cache line.




^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-04 14:57 ` Andrew Rybchenko
@ 2021-07-06  3:56   ` fengchengwen
  2021-07-06 10:02     ` Bruce Richardson
  0 siblings, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-07-06  3:56 UTC (permalink / raw)
  To: Andrew Rybchenko, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, liangma

Many thanks, mostly OK, a few comment inline

On 2021/7/4 22:57, Andrew Rybchenko wrote:
> On 7/2/21 4:18 PM, Chengwen Feng wrote:
>> This patch introduces 'dmadevice' which is a generic type of DMA
>> device.

[snip]

>> +#ifndef _RTE_DMADEV_CORE_H_
>> +#define _RTE_DMADEV_CORE_H_
>> +
>> +/**
>> + * @file
>> + *
>> + * RTE DMA Device internal header.
>> + *
>> + * This header contains internal data types. But they are still part of the
>> + * public API because they are used by inline public functions.
> 
> Do we really want it? Anyway rte_dmadev must not be here.
> Some sub-structure could be, but not entire rte_dmadev.
> 

struct rte_dmadev should expose to public for device probe and etc.
and because the public dataplane function use static inline to embellish,
should put the rte_dmadevices to public file too.

PS: it widely used in eth/regexdev...

>> +
>> +extern struct rte_dmadev rte_dmadevices[];
>> +
>> +#endif /* _RTE_DMADEV_CORE_H_ */
>> diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
> 
> Let's remove rte_ prefix from DPDK internal headers.

as above explained, it's public header file.

>> +
>> +#define RTE_DMADEV_LOG(level, fmt, args...) \
> 
> Do we need RTE_ prefix for internal API?
> 
>> +	rte_log(RTE_LOG_ ## level, libdmadev_logtype, "%s(): " fmt "\n", \
>> +		__func__, ##args)
>> +
>> +/* Macros to check for valid device */
>> +#define RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, retval) do { \
>> +	if (!rte_dmadev_pmd_is_valid_dev((dev_id))) { \
>> +		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%d", dev_id); \
>> +		return retval; \
>> +	} \
>> +} while (0)
>> +
>> +#define RTE_DMADEV_VALID_DEVID_OR_RET(dev_id) do { \
>> +	if (!rte_dmadev_pmd_is_valid_dev((dev_id))) { \
>> +		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%d", dev_id); \
>> +		return; \
>> +	} \
>> +} while (0)
>> +
>> +#define RTE_DMADEV_DETACHED  0
>> +#define RTE_DMADEV_ATTACHED  1
> 
> Do we really need RTE_ prefix for interlal defines?

with RTE_ prefix will reduce namespace conflicts.

it's same as it lib/eth or regexdev...

>> +typedef int (*dmadev_xstats_reset_t)(struct rte_dmadev *dev,
>> +				     const uint32_t ids[], uint32_t nb_ids);
>> +/**< @internal Function used to reset extended stats. */
> 
> Do we really need both stats and xstats from the very
> beginning? I think it is better to start from just
> generic stats and add xstats when it is really required.

OK, but I think we should add one dump ops, which could be useful to
find the problem.

> .
> 


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-04 15:21 ` Matan Azrad
@ 2021-07-06  6:25   ` fengchengwen
  2021-07-06  6:50     ` Matan Azrad
  0 siblings, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-07-06  6:25 UTC (permalink / raw)
  To: Matan Azrad, NBU-Contact-Thomas Monjalon, ferruh.yigit,
	bruce.richardson, jerinj, jerinjacobk
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, liangma

On 2021/7/4 23:21, Matan Azrad wrote:
> 
> 
> From: Chengwen Feng
>> This patch introduces 'dmadevice' which is a generic type of DMA
>> device.
>>
>> The APIs of dmadev library exposes some generic operations which can
>> enable configuration and I/O with the DMA devices.
>>
> Did you consider RTE_COMP_ALGO_NULL xform in compressdev library?
> 

em, I just looked at the code.

The RTE_COMP_ALGO_NULL is a small feature of the compression device.
and currently only mlx5 and isal support it.

Also the compressdev dataplane API relatively complicated to do just
DMA copy.

So I think we need a separate driver framework for the DMA device.

thanks

[snip]

> 
> 
> .
> 


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-06  6:25   ` fengchengwen
@ 2021-07-06  6:50     ` Matan Azrad
  2021-07-06  9:08       ` fengchengwen
  0 siblings, 1 reply; 339+ messages in thread
From: Matan Azrad @ 2021-07-06  6:50 UTC (permalink / raw)
  To: fengchengwen, NBU-Contact-Thomas Monjalon, ferruh.yigit,
	bruce.richardson, jerinj, jerinjacobk
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, liangma

Hi

From: fengchengwen
> On 2021/7/4 23:21, Matan Azrad wrote:
> >
> >
> > From: Chengwen Feng
> >> This patch introduces 'dmadevice' which is a generic type of DMA
> >> device.
> >>
> >> The APIs of dmadev library exposes some generic operations which can
> >> enable configuration and I/O with the DMA devices.
> >>
> > Did you consider RTE_COMP_ALGO_NULL xform in compressdev library?
> >
> 
> em, I just looked at the code.
> 
> The RTE_COMP_ALGO_NULL is a small feature of the compression device.
> and currently only mlx5 and isal support it.

Yes, but what that is mean?
If more drivers support DMA operations they can add the support there, no?


> Also the compressdev dataplane API relatively complicated to do just DMA
> copy.

You snipped more comments I wrote below 😊
Maybe it is related....

> So I think we need a separate driver framework for the DMA device.

Need to consider deprecation in compressdev if so....

> thanks
> 
> [snip]
> 
> >
> >
> > .
> >


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-05 10:52   ` Bruce Richardson
  2021-07-05 11:12     ` Morten Brørup
  2021-07-05 15:55     ` Jerin Jacob
@ 2021-07-06  8:20     ` fengchengwen
  2021-07-06  9:27       ` Bruce Richardson
  2 siblings, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-07-06  8:20 UTC (permalink / raw)
  To: Bruce Richardson, Jerin Jacob
  Cc: Thomas Monjalon, Ferruh Yigit, Jerin Jacob, dpdk-dev,
	Morten Brørup, Nipun Gupta, Hemant Agrawal, Maxime Coquelin,
	Honnappa Nagarahalli, David Marchand, Satananda Burla,
	Prasun Kapoor, Ananyev, Konstantin, liangma,
	Radha Mohan Chintakuntla

On 2021/7/5 18:52, Bruce Richardson wrote:
> On Sun, Jul 04, 2021 at 03:00:30PM +0530, Jerin Jacob wrote:
>> On Fri, Jul 2, 2021 at 6:51 PM Chengwen Feng <fengchengwen@huawei.com> wrote:

[snip]

>>> + *
>>> + * If dma_cookie_t is >=0 it's a DMA operation request cookie, <0 it's a error
>>> + * code.
>>> + * When using cookies, comply with the following rules:
>>> + * a) Cookies for each virtual queue are independent.
>>> + * b) For a virt queue, the cookie are monotonically incremented, when it reach
>>> + *    the INT_MAX, it wraps back to zero.
> 
> I disagree with the INT_MAX (or INT32_MAX) value here. If we use that
> value, it means that we cannot use implicit wrap-around inside the CPU and
> have to check for the INT_MAX value. Better to:
> 1. Specify that it wraps at UINT16_MAX which allows us to just use a
> uint16_t internally and wrap-around automatically, or:
> 2. Specify that it wraps at a power-of-2 value >= UINT16_MAX, giving
> drivers the flexibility at what value to wrap around.

+1 for option 1
BTW: option 2 seem a little complicated for driver and application.

>> When the system is configured as IOVA as VA
>> 1) Device supports any VA address like memory from rte_malloc(),
>> rte_memzone(), malloc, stack memory
>> 2) Device support only VA address from rte_malloc(), rte_memzone() i.e
>> memory backed by hugepage and added to DMA map.
>>
>> When the system is configured as IOVA as PA
>> 1) Devices support only PA addresses .
>>
>> IMO, Above needs to be  advertised as capability and application needs
>> to align with that
>> and I dont think application requests the driver to work in any of the modes.
>>
>>
> 
> I don't think we need this level of detail for addressing capabilities.
> Unless I'm missing something, the hardware should behave exactly as other
> hardware does taking in iova's.  If the user wants to check whether virtual
> addresses to pinned memory can be used directly, the user can call
> "rte_eal_iova_mode". We can't have a situation where some hardware uses one
> type of addresses and another hardware the other.
> 
> Therefore, the only additional addressing capability we should need to
> report is that the hardware can use SVM/SVA and use virtual addresses not
> in hugepage memory.
> 

I discuss the addressing capability in previous thread.
Indeed, we can reduce it to just one capability.

>>> + * @warning
>>> + * @b EXPERIMENTAL: this API may change without prior notice.
>>> + *
>>> + * Enqueue a fill operation onto the DMA virt queue
>>> + *
>>> + * This queues up a fill operation to be performed by hardware, but does not
>>> + * trigger hardware to begin that operation.
>>> + *
>>> + * @param dev_id
>>> + *   The identifier of the device.
>>> + * @param vq_id
>>> + *   The identifier of virt queue.
>>> + * @param pattern
>>> + *   The pattern to populate the destination buffer with.
>>> + * @param dst
>>> + *   The address of the destination buffer.
>>> + * @param length
>>> + *   The length of the destination buffer.
>>> + * @param flags
>>> + *   An opaque flags for this operation.
>>
>> PLEASE REMOVE opaque stuff from fastpath it will be a pain for
>> application writers as
>> they need to write multiple combinations of fastpath. flags are OK, if
>> we have a valid
>> generic flag now to control the transfer behavior.
>>
> 
> +1. Flags need to be explicitly listed. If we don't have any flags for now,
> we can specify that the value must be given as zero and it's for future
> use.
> 

+1, I will delete the flags parameters.

Currently we have fence which was implemented by ops, if later need more flags,
maybe we need create one new ops, this is not the way to expand.

So I think we need change fence ops to extra_flags ops:
	rte_dmadev_fence(uint16_t dev_id, uint16_t vq_id)
to
	rte_dmadev_extra_flags(uint16_t dev_id, uint16_t vq_id, uint64_t flags);

So we could add fence by: rte_dmadev_extra_flags(dev_id, vq_id, RTE_DMA_FLAG_FENCE);
	
>>> +/**
>>> + * @warning
>>> + * @b EXPERIMENTAL: this API may change without prior notice.
>>> + *
>>> + * Returns the number of operations that failed to complete.
>>> + * NOTE: This API was used when rte_dmadev_completed has_error was set.
>>> + *
>>> + * @param dev_id
>>> + *   The identifier of the device.
>>> + * @param vq_id
>>> + *   The identifier of virt queue.
>> (> + * @param nb_status
>>> + *   Indicates the size  of status array.
>>> + * @param[out] status
>>> + *   The error code of operations that failed to complete.
>>> + * @param[out] cookie
>>> + *   The last failed completed operation's cookie.
>>> + *
>>> + * @return
>>> + *   The number of operations that failed to complete.
>>> + *
>>> + * NOTE: The caller must ensure that the input parameter is valid and the
>>> + *       corresponding device supports the operation.
>>> + */
>>> +__rte_experimental
>>> +static inline uint16_t
>>> +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vq_id,
>>> +                          const uint16_t nb_status, uint32_t *status,
>>> +                          dma_cookie_t *cookie)
>>
>> IMO, it is better to move cookie/rind_idx at 3.
>> Why it would return any array of errors? since it called after
>> rte_dmadev_completed() has
>> has_error. Is it better to change
>>
>> rte_dmadev_error_status((uint16_t dev_id, uint16_t vq_id, dma_cookie_t
>> *cookie,  uint32_t *status)
>>
>> I also think, we may need to set status as bitmask and enumerate all
>> the combination of error codes
>> of all the driver and return string from driver existing rte_flow_error
>>
>> See
>> struct rte_flow_error {
>>         enum rte_flow_error_type type; /**< Cause field and error types. */
>>         const void *cause; /**< Object responsible for the error. */
>>         const char *message; /**< Human-readable error message. */
>> };
>>
> 
> I think we need a multi-return value API here, as we may add operations in
> future which have non-error status values to return. The obvious case is
> DMA engines which support "compare" operations. In that case a successful

Just curious, what the 'compare' operations's application scenario ?

> compare (as in there were no DMA or HW errors) can return "equal" or
> "not-equal" as statuses. For general "copy" operations, the faster
> completion op can be used to just return successful values (and only call
> this status version on error), while apps using those compare ops or a
> mixture of copy and compare ops, would always use the slower one that
> returns status values for each and every op..

In the current design, rte_dmadev_completed_fails applies only to failure
scenarios. Do you mean in 'compare' operations, the status always non-zero
whether or not the two are consistent ?

> 
> The ioat APIs used 32-bit integer values for this status array so as to
> allow e.g. 16-bits for error code and 16-bits for future status values. For
> most operations there should be a fairly small set of things that can go
> wrong, i.e. bad source address, bad destination address or invalid length.
> Within that we may have a couple of specifics for why an address is bad,
> but even so I don't think we need to start having multiple bit
> combinations.
> 
>>> +{
>>> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
>>> +       return (*dev->completed_fails)(dev, vq_id, nb_status, status, cookie);
>>> +}
>>> +
>>> +struct rte_dmadev_stats {
>>> +       uint64_t enqueue_fail_count;
>>> +       /**< Conut of all operations which failed enqueued */
>>> +       uint64_t enqueued_count;
>>> +       /**< Count of all operations which successful enqueued */
>>> +       uint64_t completed_fail_count;
>>> +       /**< Count of all operations which failed to complete */
>>> +       uint64_t completed_count;
>>> +       /**< Count of all operations which successful complete */
>>> +};
>>
>> We need to have capability API to tell which items are
>> updated/supported by the driver.
>>
> 
> I also would remove the enqueue fail counts, since they are better counted
> by the app. If a driver reports 20,000 failures we have no way of knowing
> if that is 20,000 unique operations which failed to enqueue or a single
> operation which failed to enqueue 20,000 times but succeeded on attempt
> 20,001.
> 

This does exist, The application may just show a DEBUG trace other than recording.
So I would recommend keeping at least know if it happens after a long run.

> 


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-06  6:50     ` Matan Azrad
@ 2021-07-06  9:08       ` fengchengwen
  2021-07-06  9:17         ` Matan Azrad
  0 siblings, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-07-06  9:08 UTC (permalink / raw)
  To: Matan Azrad, NBU-Contact-Thomas Monjalon, ferruh.yigit,
	bruce.richardson, jerinj, jerinjacobk
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, liangma

On 2021/7/6 14:50, Matan Azrad wrote:
> Hi
> 
> From: fengchengwen
>> On 2021/7/4 23:21, Matan Azrad wrote:
>>>
>>>
>>> From: Chengwen Feng
>>>> This patch introduces 'dmadevice' which is a generic type of DMA
>>>> device.
>>>>
>>>> The APIs of dmadev library exposes some generic operations which can
>>>> enable configuration and I/O with the DMA devices.
>>>>
>>> Did you consider RTE_COMP_ALGO_NULL xform in compressdev library?
>>>
>>
>> em, I just looked at the code.
>>
>> The RTE_COMP_ALGO_NULL is a small feature of the compression device.
>> and currently only mlx5 and isal support it.
> 
> Yes, but what that is mean?
> If more drivers support DMA operations they can add the support there, no?
> 

You mean to expand directly on compressdev ?
I think it hard to expand, and may break the compressdev concept.

> 
>> Also the compressdev dataplane API relatively complicated to do just DMA
>> copy.
> 
> You snipped more comments I wrote below 😊
> Maybe it is related....

Sorry, I just skipped.

'Did you consider also mbuf API usage for memory descriptor?'
---One scenario of the DMA is vhost-net, which src or dst could be mbuf, but the
peer were not mbuf. so here we use raw fields.

> 
>> So I think we need a separate driver framework for the DMA device.
> 
> Need to consider deprecation in compressdev if so....
> 
>> thanks
>>
>> [snip]
>>
>>>
>>>
>>> .
>>>
> 


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-06  9:08       ` fengchengwen
@ 2021-07-06  9:17         ` Matan Azrad
  0 siblings, 0 replies; 339+ messages in thread
From: Matan Azrad @ 2021-07-06  9:17 UTC (permalink / raw)
  To: fengchengwen, NBU-Contact-Thomas Monjalon, ferruh.yigit,
	bruce.richardson, jerinj, jerinjacobk
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, liangma



From: fengchengwen
> On 2021/7/6 14:50, Matan Azrad wrote:
> > Hi
> >
> > From: fengchengwen
> >> On 2021/7/4 23:21, Matan Azrad wrote:
> >>>
> >>>
> >>> From: Chengwen Feng
> >>>> This patch introduces 'dmadevice' which is a generic type of DMA
> >>>> device.
> >>>>
> >>>> The APIs of dmadev library exposes some generic operations which
> >>>> can enable configuration and I/O with the DMA devices.
> >>>>
> >>> Did you consider RTE_COMP_ALGO_NULL xform in compressdev library?
> >>>
> >>
> >> em, I just looked at the code.
> >>
> >> The RTE_COMP_ALGO_NULL is a small feature of the compression device.
> >> and currently only mlx5 and isal support it.
> >
> > Yes, but what that is mean?
> > If more drivers support DMA operations they can add the support there,
> no?
> >
> 
> You mean to expand directly on compressdev ?
> I think it hard to expand, and may break the compressdev concept.

Maybe, what do you need to expand?
Also maybe your expansion is related also to compress, finally both are mem-to-mem offload.
 

> >
> >> Also the compressdev dataplane API relatively complicated to do just
> >> DMA copy.
> >
> > You snipped more comments I wrote below 😊
> > Maybe it is related....
> 
> Sorry, I just skipped.
> 
> 'Did you consider also mbuf API usage for memory descriptor?'
> ---One scenario of the DMA is vhost-net, which src or dst could be mbuf, but
> the peer were not mbuf. so here we use raw fields.

Did you consider using external\attached mbuf for this case?

Did you also consider raw API in cryptodev library?
 
> >
> >> So I think we need a separate driver framework for the DMA device.
> >
> > Need to consider deprecation in compressdev if so....
> >
> >> thanks
> >>
> >> [snip]
> >>
> >>>
> >>>
> >>> .
> >>>
> >


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-06  8:20     ` fengchengwen
@ 2021-07-06  9:27       ` Bruce Richardson
  0 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-06  9:27 UTC (permalink / raw)
  To: fengchengwen
  Cc: Jerin Jacob, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	dpdk-dev, Morten Brørup, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma,
	Radha Mohan Chintakuntla

On Tue, Jul 06, 2021 at 04:20:38PM +0800, fengchengwen wrote:
> On 2021/7/5 18:52, Bruce Richardson wrote:
> > On Sun, Jul 04, 2021 at 03:00:30PM +0530, Jerin Jacob wrote:
> >> On Fri, Jul 2, 2021 at 6:51 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
> 
> [snip]
> 
> >>> + *
> >>> + * If dma_cookie_t is >=0 it's a DMA operation request cookie, <0 it's a error
> >>> + * code.
> >>> + * When using cookies, comply with the following rules:
> >>> + * a) Cookies for each virtual queue are independent.
> >>> + * b) For a virt queue, the cookie are monotonically incremented, when it reach
> >>> + *    the INT_MAX, it wraps back to zero.
> > 
> > I disagree with the INT_MAX (or INT32_MAX) value here. If we use that
> > value, it means that we cannot use implicit wrap-around inside the CPU and
> > have to check for the INT_MAX value. Better to:
> > 1. Specify that it wraps at UINT16_MAX which allows us to just use a
> > uint16_t internally and wrap-around automatically, or:
> > 2. Specify that it wraps at a power-of-2 value >= UINT16_MAX, giving
> > drivers the flexibility at what value to wrap around.
> 
> +1 for option 1
> BTW: option 2 seem a little complicated for driver and application.
> 

I would tend to agree. I just included it in case there was a case where
you explicitly wanted more than UINT16_MAX values in your driver.

> >> When the system is configured as IOVA as VA
> >> 1) Device supports any VA address like memory from rte_malloc(),
> >> rte_memzone(), malloc, stack memory
> >> 2) Device support only VA address from rte_malloc(), rte_memzone() i.e
> >> memory backed by hugepage and added to DMA map.
> >>
> >> When the system is configured as IOVA as PA
> >> 1) Devices support only PA addresses .
> >>
> >> IMO, Above needs to be  advertised as capability and application needs
> >> to align with that
> >> and I dont think application requests the driver to work in any of the modes.
> >>
> >>
> > 
> > I don't think we need this level of detail for addressing capabilities.
> > Unless I'm missing something, the hardware should behave exactly as other
> > hardware does taking in iova's.  If the user wants to check whether virtual
> > addresses to pinned memory can be used directly, the user can call
> > "rte_eal_iova_mode". We can't have a situation where some hardware uses one
> > type of addresses and another hardware the other.
> > 
> > Therefore, the only additional addressing capability we should need to
> > report is that the hardware can use SVM/SVA and use virtual addresses not
> > in hugepage memory.
> > 
> 
> I discuss the addressing capability in previous thread.
> Indeed, we can reduce it to just one capability.
> 
> >>> + * @warning
> >>> + * @b EXPERIMENTAL: this API may change without prior notice.
> >>> + *
> >>> + * Enqueue a fill operation onto the DMA virt queue
> >>> + *
> >>> + * This queues up a fill operation to be performed by hardware, but does not
> >>> + * trigger hardware to begin that operation.
> >>> + *
> >>> + * @param dev_id
> >>> + *   The identifier of the device.
> >>> + * @param vq_id
> >>> + *   The identifier of virt queue.
> >>> + * @param pattern
> >>> + *   The pattern to populate the destination buffer with.
> >>> + * @param dst
> >>> + *   The address of the destination buffer.
> >>> + * @param length
> >>> + *   The length of the destination buffer.
> >>> + * @param flags
> >>> + *   An opaque flags for this operation.
> >>
> >> PLEASE REMOVE opaque stuff from fastpath it will be a pain for
> >> application writers as
> >> they need to write multiple combinations of fastpath. flags are OK, if
> >> we have a valid
> >> generic flag now to control the transfer behavior.
> >>
> > 
> > +1. Flags need to be explicitly listed. If we don't have any flags for now,
> > we can specify that the value must be given as zero and it's for future
> > use.
> > 
> 
> +1, I will delete the flags parameters.
> 
> Currently we have fence which was implemented by ops, if later need more flags,
> maybe we need create one new ops, this is not the way to expand.
> 
> So I think we need change fence ops to extra_flags ops:
> 	rte_dmadev_fence(uint16_t dev_id, uint16_t vq_id)
> to
> 	rte_dmadev_extra_flags(uint16_t dev_id, uint16_t vq_id, uint64_t flags);
> 
> So we could add fence by: rte_dmadev_extra_flags(dev_id, vq_id, RTE_DMA_FLAG_FENCE);
> 

I don't think this is the way to go. I think we will need the flags
parameter per op in the future, so we should keep it, even if it is always
zero for now. It gives us future expandability options.

> >>> +/**
> >>> + * @warning
> >>> + * @b EXPERIMENTAL: this API may change without prior notice.
> >>> + *
> >>> + * Returns the number of operations that failed to complete.
> >>> + * NOTE: This API was used when rte_dmadev_completed has_error was set.
> >>> + *
> >>> + * @param dev_id
> >>> + *   The identifier of the device.
> >>> + * @param vq_id
> >>> + *   The identifier of virt queue.
> >> (> + * @param nb_status
> >>> + *   Indicates the size  of status array.
> >>> + * @param[out] status
> >>> + *   The error code of operations that failed to complete.
> >>> + * @param[out] cookie
> >>> + *   The last failed completed operation's cookie.
> >>> + *
> >>> + * @return
> >>> + *   The number of operations that failed to complete.
> >>> + *
> >>> + * NOTE: The caller must ensure that the input parameter is valid and the
> >>> + *       corresponding device supports the operation.
> >>> + */
> >>> +__rte_experimental
> >>> +static inline uint16_t
> >>> +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vq_id,
> >>> +                          const uint16_t nb_status, uint32_t *status,
> >>> +                          dma_cookie_t *cookie)
> >>
> >> IMO, it is better to move cookie/rind_idx at 3.
> >> Why it would return any array of errors? since it called after
> >> rte_dmadev_completed() has
> >> has_error. Is it better to change
> >>
> >> rte_dmadev_error_status((uint16_t dev_id, uint16_t vq_id, dma_cookie_t
> >> *cookie,  uint32_t *status)
> >>
> >> I also think, we may need to set status as bitmask and enumerate all
> >> the combination of error codes
> >> of all the driver and return string from driver existing rte_flow_error
> >>
> >> See
> >> struct rte_flow_error {
> >>         enum rte_flow_error_type type; /**< Cause field and error types. */
> >>         const void *cause; /**< Object responsible for the error. */
> >>         const char *message; /**< Human-readable error message. */
> >> };
> >>
> > 
> > I think we need a multi-return value API here, as we may add operations in
> > future which have non-error status values to return. The obvious case is
> > DMA engines which support "compare" operations. In that case a successful
> 
> Just curious, what the 'compare' operations's application scenario ?
> 

We are not looking to use this capability just now - but it's a capability
in our hardware that offers some interest possibilities so I'd like to
ensure it's possible to integrate in future. To do so, we just need to
ensure that the function which returns the "error" status - or status
generally, can be used to returns bursts of statuses, even if it's slower
compared to the regular completion return which just assumes all succeed.

> > compare (as in there were no DMA or HW errors) can return "equal" or
> > "not-equal" as statuses. For general "copy" operations, the faster
> > completion op can be used to just return successful values (and only call
> > this status version on error), while apps using those compare ops or a
> > mixture of copy and compare ops, would always use the slower one that
> > returns status values for each and every op..
> 
> In the current design, rte_dmadev_completed_fails applies only to failure
> scenarios. Do you mean in 'compare' operations, the status always non-zero
> whether or not the two are consistent ?
> 

Yes and no. There are two separate "status" values to be returned for such
operations - the actual HW status i.e. all parameters valid, and the actual
memcmp result of equal/non-equal. In our completion records these are
called "status" and "result" respectively. "Result" is only valid if
"status" is successful, and is not relevant for copy or fill or similar
ops. Therefore, to support this, we just need some bits in "status"
reserved for that result case, and the completed_status op to return an
array of status values, not just a single one.

> > 
> > The ioat APIs used 32-bit integer values for this status array so as to
> > allow e.g. 16-bits for error code and 16-bits for future status values. For
> > most operations there should be a fairly small set of things that can go
> > wrong, i.e. bad source address, bad destination address or invalid length.
> > Within that we may have a couple of specifics for why an address is bad,
> > but even so I don't think we need to start having multiple bit
> > combinations.
> > 
> >>> +{
> >>> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> >>> +       return (*dev->completed_fails)(dev, vq_id, nb_status, status, cookie);
> >>> +}
> >>> +
> >>> +struct rte_dmadev_stats {
> >>> +       uint64_t enqueue_fail_count;
> >>> +       /**< Conut of all operations which failed enqueued */
> >>> +       uint64_t enqueued_count;
> >>> +       /**< Count of all operations which successful enqueued */
> >>> +       uint64_t completed_fail_count;
> >>> +       /**< Count of all operations which failed to complete */
> >>> +       uint64_t completed_count;
> >>> +       /**< Count of all operations which successful complete */
> >>> +};
> >>
> >> We need to have capability API to tell which items are
> >> updated/supported by the driver.
> >>
> > 
> > I also would remove the enqueue fail counts, since they are better counted
> > by the app. If a driver reports 20,000 failures we have no way of knowing
> > if that is 20,000 unique operations which failed to enqueue or a single
> > operation which failed to enqueue 20,000 times but succeeded on attempt
> > 20,001.
> > 
> 
> This does exist, The application may just show a DEBUG trace other than recording.
> So I would recommend keeping at least know if it happens after a long run.
> 
I disagree here - the enqueue failure should only be tracked by the app,
because:
1. only app knows whether a particular enqueue failure is retry or not and
   how it should be counted
2. these failures cannot be counted by hardware and must be counted by
   software, so adding additional operations to our enqueue path. In the
   retry case, that could be a lot of load-update-stores that will have to
   be done in the driver, while if tracked in the app, the count would
   just be a register increment.

Operation failures can be tracked in driver stats, though, as that is
related to hardware operation.

/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-06  3:01   ` fengchengwen
@ 2021-07-06 10:01     ` Bruce Richardson
  0 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-06 10:01 UTC (permalink / raw)
  To: fengchengwen
  Cc: Jerin Jacob, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	dpdk-dev, Morten Brørup, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma,
	Radha Mohan Chintakuntla

On Tue, Jul 06, 2021 at 11:01:17AM +0800, fengchengwen wrote:
> Many thanks, mostly OK, and a few comment inline
> 
> On 2021/7/4 17:30, Jerin Jacob wrote:
> > On Fri, Jul 2, 2021 at 6:51 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
> >>
> >> This patch introduces 'dmadevice' which is a generic type of DMA
> >> device.
> ...
> >> +#include <rte_compat.h>
> > 
> > Sort in alphabetical order.
> > 
> >> +
> >> +/**
> >> + * dma_cookie_t - an opaque DMA cookie
> > 
> > Since we are defining the behaviour is not opaque any more.
> > I think, it is better to call ring_idx or so.
> > 
> 
> 
> This type is designed to have two meanings, return <0 on failure and return >=0 on success.
> 
> How about follwing definition:
>     typedef int dma_ring_index_t;
> 
> if >= 0, it's value range is [0, 65535] = uint16_t, so driver implementation will simply.
> if <0, then men enqueue failure
> 
> For driver, it could hold uint16_t ring_index, if enquer fail just return fail, else return
> the current ring_index, and update it by: ring_index++;
> 

Well, yes and no on the "two meanings". For the enqueue function, yes the
return value can have two meanings, but I don't consider them one type. On
the completion call, however, this can only be positive values <
UINT16_MAX, so having two meanings is actually confusing. Better to have

* enqueue return regular int, with doxygen comment 
	"@return 
	  Negative on error, otherwise job index between 0 and UINT16_MAX"
* for completions, take a uint16_t* parameter for the last completed index
  since no negative values are needed.

Beyond this, we generally don't use typedefs in DPDK for basic types (with
a few exceptions e.g. rte_iova_t), and save their use only for function
pointers.

> >> +
> >> +/**
> >> + * A structure used to retrieve the contextual information of
> >> + * an DMA device
> >> + */
> >> +struct rte_dmadev_info {
> >> +       /**
> >> +        * Fields filled by framewok
> > 
> > typo.
> > 
> >> +        */
> >> +       struct rte_device *device; /**< Generic Device information */
> >> +       const char *driver_name; /**< Device driver name */
> >> +       int socket_id; /**< Socket ID where memory is allocated */
> >> +
> >> +       /**
> >> +        * Specification fields filled by driver
> >> +        */
> >> +       uint64_t dev_capa; /**< Device capabilities (RTE_DMA_DEV_CAPA_) */
> >> +       uint16_t max_hw_queues; /**< Maximum number of HW queues. */
> >> +       uint16_t max_vqs_per_hw_queue;
> >> +       /**< Maximum number of virt queues to allocate per HW queue */
> >> +       uint16_t max_desc;
> >> +       /**< Maximum allowed number of virt queue descriptors */
> >> +       uint16_t min_desc;
> >> +       /**< Minimum allowed number of virt queue descriptors */
> > 
> > Please add max_nb_segs. i.e maximum number of segments supported.
> 
> Do you means something like "burst_size" ?
> 
> > 
> >> +
> >> +       /**
> >> +        * Status fields filled by driver
> >> +        */
> >> +       uint16_t nb_hw_queues; /**< Number of HW queues configured */
> >> +       uint16_t nb_vqs; /**< Number of virt queues configured */
> >> +};
> >> + i
> >> +
> >> +/**
> >> + * dma_address_type
> >> + */
> >> +enum dma_address_type {
> >> +       DMA_ADDRESS_TYPE_IOVA, /**< Use IOVA as dma address */
> >> +       DMA_ADDRESS_TYPE_VA, /**< Use VA as dma address */
> >> +};
> >> +
> >> +/**
> >> + * A structure used to configure a DMA device.
> >> + */
> >> +struct rte_dmadev_conf {
> >> +       enum dma_address_type addr_type; /**< Address type to used */
> > 
> > I think, there are 3 kinds of limitations/capabilities.
> > 
> > When the system is configured as IOVA as VA
> > 1) Device supports any VA address like memory from rte_malloc(),
> > rte_memzone(), malloc, stack memory
> > 2) Device support only VA address from rte_malloc(), rte_memzone() i.e
> > memory backed by hugepage and added to DMA map.
> > 
> > When the system is configured as IOVA as PA
> > 1) Devices support only PA addresses .
> > 
> > IMO, Above needs to be  advertised as capability and application needs
> > to align with that
> > and I dont think application requests the driver to work in any of the modes.
> > 
> 
> OK, Let's put together our ideas on address type:
> 
> There are three mode, we may define as:
> 	IOVA_as_VA-ALL     ---for device which may need support SVA feature
>                            ---may also be a CPU memcpy 'device'
> 	IOVA_as_VA         ---for device which need support IOMMU
> 	IOVA_as_PA
> 
> There are many combination of the modes which device supports: eg. some device
> may only support IOVA_as_PA, some may only support IOVA_as_VA, and some support
> IOVA_as_PA and IOVA_as_VA. The specific runtime type is determined by the vfio
> and drive capability(e.g RTE_PCI_DRV_NEED_IOVA_AS_VA).
> 
> So we already define two capabilities for this:
> 	#define RTE_DMA_DEV_CAPA_IOVA	(1ull << 8) /**< Support IOVA as DMA address */
> 					---this cover IOVA_as_VA and IOVA_as_PA
> 	#define RTE_DMA_DEV_CAPA_VA	(1ull << 9) /**< Support VA as DMA address */
> 					---this cover IOVA_as_VA-ALL
> for a device which don't support SVA:
> 	only declare RTE_DMA_DEV_CAPA_IOVA
> for a device which support SVA:
> 	delcare RTE_DAMA_DEV_CAPA_IOVA
> 	delcare RTE_DMA_DEV_CAPA_VA (only when IOMMU enabled and 'SVA flag' was set)
> for a CPU memcpy device:
> 	only declare RTE_DMA_DEV_CAPA_VA
> 
> As application:
> - if RTE_DMA_DEV_CAPA_VA support, then it could pass any va address to the DMA,
> - else if RTE_DMA_DEV_CAPA_IOVA support, then it should pass iova address to the DMA
> - else the DMA device should not exist.
> 

I still don't think we need all of this. DPDK already has support through
the existing bus infrastructure for determining if DPDK needs to use
physical or virtual addresses, so we should not be duplicating that as
devices *cannot* use a different addressing mode to DPDK itself.
Given that, the only flag we need is one to indicate SVA support.

> > 
<snip>
> > 
> > I also think, we may need to set status as bitmask and enumerate all
> > the combination of error codes
> > of all the driver and return string from driver existing rte_flow_error
> > 
> 
> bitmask has limit for most 32 (or we can extend 64), and also the rte_flow_error is
> heavy.
> 
> Considering that errors are a small number of scenarios, so it's OK to
> pass status array, and status have 32bit it could denotes a very large number
> of errcode.
> 

+1 to this.

> >> +
> >> +struct rte_dmadev_stats {
> >> +       uint64_t enqueue_fail_count;
> >> +       /**< Conut of all operations which failed enqueued */
> >> +       uint64_t enqueued_count;
> >> +       /**< Count of all operations which successful enqueued */
> >> +       uint64_t completed_fail_count;
> >> +       /**< Count of all operations which failed to complete */
> >> +       uint64_t completed_count;
> >> +       /**< Count of all operations which successful complete */
> >> +};
> > 
> > We need to have capability API to tell which items are
> > updated/supported by the driver.
> > 
> 
> There are fewer fields, and I don't think it's necessary to add capability API,
> for those who don't support, it could don't implement the callback.
> For those support, these fields are minimum et.
> 
> > 
> >> diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
> >> new file mode 100644
> >> index 0000000..a3afea2
> >> --- /dev/null
> >> +++ b/lib/dmadev/rte_dmadev_core.h
> >> @@ -0,0 +1,98 @@
> >> +/* SPDX-License-Identifier: BSD-3-Clause
> >> + * Copyright 2021 HiSilicon Limited.
> >> + */
> >> +
> >> +#ifndef _RTE_DMADEV_CORE_H_
> >> +#define _RTE_DMADEV_CORE_H_
> >> +
> >> +/**
> >> + * @file
> >> + *
> >> + * RTE DMA Device internal header.
> >> + *
> >> + * This header contains internal data types. But they are still part of the
> >> + * public API because they are used by inline public functions.
> >> + */
> >> +
> >> +struct rte_dmadev;
> >> +
> >> +typedef dma_cookie_t (*dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vq_id,
> >> +                                     void *src, void *dst,
> >> +                                     uint32_t length, uint64_t flags);
> >> +/**< @internal Function used to enqueue a copy operation. */
> > 
> > To avoid namespace conflict(as it is public API) use rte_
> 
> These are internal function used by driver, not application.
> and the eth/regexdev_core also defined without rte_
> 
> So I think it should remain as it is.
> 

Even if only used by a driver, APIs are exported from the .so built for
the library, which means that they become public for apps using the lib.
Even for header-only symbols for drivers, it's good practice to put the
prefix since they are for use outside the compilation unit.

> > 
> > 
> >> +
> >> +/**
> >> + * The data structure associated with each DMA device.
> >> + */
> >> +struct rte_dmadev {
> >> +       /**< Enqueue a copy operation onto the DMA device. */
> >> +       dmadev_copy_t copy;
> >> +       /**< Enqueue a scatter list copy operation onto the DMA device. */
> >> +       dmadev_copy_sg_t copy_sg;
> >> +       /**< Enqueue a fill operation onto the DMA device. */
> >> +       dmadev_fill_t fill;
> >> +       /**< Enqueue a scatter list fill operation onto the DMA device. */
> >> +       dmadev_fill_sg_t fill_sg;
> >> +       /**< Add a fence to force ordering between operations. */
> >> +       dmadev_fence_t fence;
> >> +       /**< Trigger hardware to begin performing enqueued operations. */
> >> +       dmadev_perform_t perform;
> >> +       /**< Returns the number of operations that successful completed. */
> >> +       dmadev_completed_t completed;
> >> +       /**< Returns the number of operations that failed to complete. */
> >> +       dmadev_completed_fails_t completed_fails;
> > 
> > We need to limit fastpath items in 1 CL
> 
> yes, currently there are 8 callback, which just fill one cache line.
> 

Before we get overly concerned about this, I think we should benchmark it
to see how much our "one cacheline" is giving us compared to having them in
ops. For example, the "perform" doorbell function, or the completed
function is only called once every burst, so it would be interesting to see
how much difference it really makes for that.

/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-06  3:56   ` fengchengwen
@ 2021-07-06 10:02     ` Bruce Richardson
  0 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-06 10:02 UTC (permalink / raw)
  To: fengchengwen
  Cc: Andrew Rybchenko, thomas, ferruh.yigit, jerinj, jerinjacobk, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, liangma

On Tue, Jul 06, 2021 at 11:56:03AM +0800, fengchengwen wrote:
> Many thanks, mostly OK, a few comment inline
> 
> On 2021/7/4 22:57, Andrew Rybchenko wrote:
> > On 7/2/21 4:18 PM, Chengwen Feng wrote:
> >> This patch introduces 'dmadevice' which is a generic type of DMA
> >> device.
<snip>
> > Do we really need both stats and xstats from the very
> > beginning? I think it is better to start from just
> > generic stats and add xstats when it is really required.
> 
> OK, but I think we should add one dump ops, which could be useful to
> find the problem.
> 
+1 to both suggestions - dropping xstats (for now) and adding dump fn.

^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (3 preceding siblings ...)
  2021-07-04 15:21 ` Matan Azrad
@ 2021-07-06 20:28 ` Bruce Richardson
  2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 1/9] dmadev: add missing exports Bruce Richardson
                     ` (9 more replies)
  2021-07-11  9:25 ` [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library Chengwen Feng
                   ` (24 subsequent siblings)
  29 siblings, 10 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-06 20:28 UTC (permalink / raw)
  To: dev
  Cc: Chengwen Feng, Jerin Jacob, Jerin Jacob, Morten Brørup,
	Bruce Richardson

This patchset contains a series of changes to dmadev based on work being done to
port over our drivers to test this new infrastructure. Some of these are bug
fixes to enable compilation e.g. missing exports or meson.build files, while
others are suggested changes to enhance the API. All these patches are to be
applied on top of [1] as they are mostly suggested changes to that RFC i.e.
patches to the patch!

The final patch includes some basic sanity tests for copy operations that we
have ported over from the ioat self-tests to use the dmadev APIs. The basic
dataplane part of those tests is probably ok for now, but the initialization of
queues in that test code may need some enhancement. Feedback welcome.

A tree with all these patches applied can be got at [2] if anyone wants to use
that as a basis for working on drivers, or for other discussion.

[1] http://patches.dpdk.org/project/dpdk/patch/1625231891-2963-1-git-send-email-fengchengwen@huawei.com/
[2] https://github.com/bruce-richardson/dpdk/tree/dmadev-rfcs

Bruce Richardson (9):
  dmadev: add missing exports
  dmadev: change virtual addresses to IOVA
  dmadev: add dump function
  dmadev: remove xstats functions
  dmadev: drop cookie typedef
  dmadev: allow NULL parameters to completed ops call
  dmadev: stats structure updates
  drivers: add dma driver category
  app/test: add basic dmadev unit test

 app/test/meson.build         |   2 +
 app/test/test_dmadev.c       | 320 +++++++++++++++++++++++++++++++++++
 drivers/dma/meson.build      |  11 ++
 drivers/meson.build          |   1 +
 lib/dmadev/rte_dmadev.c      |  66 ++------
 lib/dmadev/rte_dmadev.h      | 204 +++++++---------------
 lib/dmadev/rte_dmadev_core.h |  16 +-
 lib/dmadev/rte_dmadev_pmd.h  |  24 +--
 lib/dmadev/version.map       |   7 +-
 9 files changed, 425 insertions(+), 226 deletions(-)
 create mode 100644 app/test/test_dmadev.c
 create mode 100644 drivers/dma/meson.build

--
2.30.2


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [RFC UPDATE PATCH 1/9] dmadev: add missing exports
  2021-07-06 20:28 ` [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates Bruce Richardson
@ 2021-07-06 20:28   ` Bruce Richardson
  2021-07-07  8:26     ` David Marchand
  2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 2/9] dmadev: change virtual addresses to IOVA Bruce Richardson
                     ` (8 subsequent siblings)
  9 siblings, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-07-06 20:28 UTC (permalink / raw)
  To: dev
  Cc: Chengwen Feng, Jerin Jacob, Jerin Jacob, Morten Brørup,
	Bruce Richardson

Export the rte_dmadevices array and the allocate and release functions
which are needed by PMDs.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 drivers/meson.build     | 1 +
 lib/dmadev/rte_dmadev.c | 2 ++
 lib/dmadev/version.map  | 3 +++
 3 files changed, 6 insertions(+)

diff --git a/drivers/meson.build b/drivers/meson.build
index bc6f4f567..f09a9172c 100644
--- a/drivers/meson.build
+++ b/drivers/meson.build
@@ -9,6 +9,7 @@ subdirs = [
         'common/mlx5',    # depends on bus.
         'common/qat',     # depends on bus.
         'common/sfc_efx', # depends on bus.
+        'dma',            # depends on bus.
         'mempool',        # depends on common and bus.
         'net',            # depends on common, bus, mempool
         'raw',            # depends on common, bus and net.
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
index a94e83984..855f4d272 100644
--- a/lib/dmadev/rte_dmadev.c
+++ b/lib/dmadev/rte_dmadev.c
@@ -372,6 +372,7 @@ rte_dmadev_find_free_device_index(void)
 	return RTE_DMADEV_MAX_DEVS;
 }
 
+__rte_experimental
 struct rte_dmadev *
 rte_dmadev_pmd_allocate(const char *name, size_t dev_priv_size, int socket_id)
 {
@@ -414,6 +415,7 @@ rte_dmadev_pmd_allocate(const char *name, size_t dev_priv_size, int socket_id)
 	return dev;
 }
 
+__rte_experimental
 int
 rte_dmadev_pmd_release(struct rte_dmadev *dev)
 {
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index 383b3ca5f..a0a121f3a 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -1,6 +1,8 @@
 EXPERIMENTAL {
 	global:
 
+	rte_dmadevices;
+	rte_dmadev_pmd_allocate;
 	rte_dmadev_count;
 	rte_dmadev_get_dev_id;
 	rte_dmadev_socket_id;
@@ -19,6 +21,7 @@ EXPERIMENTAL {
 	rte_dmadev_fill_sg;
 	rte_dmadev_fence;
 	rte_dmadev_perform;
+	rte_dmadev_pmd_release;
 	rte_dmadev_completed;
 	rte_dmadev_completed_fails;
 	rte_dmadev_stats_get;
-- 
2.30.2


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [RFC UPDATE PATCH 2/9] dmadev: change virtual addresses to IOVA
  2021-07-06 20:28 ` [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates Bruce Richardson
  2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 1/9] dmadev: add missing exports Bruce Richardson
@ 2021-07-06 20:28   ` Bruce Richardson
  2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 3/9] dmadev: add dump function Bruce Richardson
                     ` (7 subsequent siblings)
  9 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-06 20:28 UTC (permalink / raw)
  To: dev
  Cc: Chengwen Feng, Jerin Jacob, Jerin Jacob, Morten Brørup,
	Bruce Richardson

For 32-bit builds, iova's are 64-bit still, so to ensure we can still
use PA mode on 32-bit we need to convert all enqueue "void *" parameters
to rte_iova_t

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/dmadev/rte_dmadev.h      | 8 ++++----
 lib/dmadev/rte_dmadev_core.h | 4 ++--
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index f74fc6adb..1659ceaf2 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -133,8 +133,8 @@ typedef int32_t dma_cookie_t;
  * dma_scatterlist - can hold scatter DMA operation request
  */
 struct dma_scatterlist {
-	void *src;
-	void *dst;
+	rte_iova_t src;
+	rte_iova_t dst;
 	uint32_t length;
 };
 
@@ -505,7 +505,7 @@ rte_dmadev_queue_info_get(uint16_t dev_id, uint16_t vq_id,
  */
 __rte_experimental
 static inline dma_cookie_t
-rte_dmadev_copy(uint16_t dev_id, uint16_t vq_id, void *src, void *dst,
+rte_dmadev_copy(uint16_t dev_id, uint16_t vq_id, rte_iova_t src, rte_iova_t dst,
 		uint32_t length, uint64_t flags)
 {
 	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
@@ -579,7 +579,7 @@ rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vq_id,
 __rte_experimental
 static inline dma_cookie_t
 rte_dmadev_fill(uint16_t dev_id, uint16_t vq_id, uint64_t pattern,
-		void *dst, uint32_t length, uint64_t flags)
+		rte_iova_t dst, uint32_t length, uint64_t flags)
 {
 	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
 	return (*dev->fill)(dev, vq_id, pattern, dst, length, flags);
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index a3afea251..80b56ed83 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -17,7 +17,7 @@
 struct rte_dmadev;
 
 typedef dma_cookie_t (*dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vq_id,
-				      void *src, void *dst,
+				      rte_iova_t src, rte_iova_t dst,
 				      uint32_t length, uint64_t flags);
 /**< @internal Function used to enqueue a copy operation. */
 
@@ -27,7 +27,7 @@ typedef dma_cookie_t (*dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vq_id,
 /**< @internal Function used to enqueue a scatter list copy operation. */
 
 typedef dma_cookie_t (*dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vq_id,
-				      uint64_t pattern, void *dst,
+				      uint64_t pattern, rte_iova_t dst,
 				      uint32_t length, uint64_t flags);
 /**< @internal Function used to enqueue a fill operation. */
 
-- 
2.30.2


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [RFC UPDATE PATCH 3/9] dmadev: add dump function
  2021-07-06 20:28 ` [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates Bruce Richardson
  2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 1/9] dmadev: add missing exports Bruce Richardson
  2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 2/9] dmadev: change virtual addresses to IOVA Bruce Richardson
@ 2021-07-06 20:28   ` Bruce Richardson
  2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 4/9] dmadev: remove xstats functions Bruce Richardson
                     ` (6 subsequent siblings)
  9 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-06 20:28 UTC (permalink / raw)
  To: dev
  Cc: Chengwen Feng, Jerin Jacob, Jerin Jacob, Morten Brørup,
	Bruce Richardson

a dump() function to print the state of a device to a file (e.g. sterr
or stdout) is very useful for debugging drivers.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/dmadev/rte_dmadev.c     | 17 +++++++++++++++++
 lib/dmadev/rte_dmadev.h     | 19 +++++++++++++++++++
 lib/dmadev/rte_dmadev_pmd.h |  5 +++++
 lib/dmadev/version.map      |  1 +
 4 files changed, 42 insertions(+)

diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
index 855f4d272..ffd7c5b97 100644
--- a/lib/dmadev/rte_dmadev.c
+++ b/lib/dmadev/rte_dmadev.c
@@ -345,6 +345,23 @@ rte_dmadev_xstats_reset(uint16_t dev_id, const uint32_t ids[], uint32_t nb_ids)
 	return (*dev->dev_ops->xstats_reset)(dev, ids, nb_ids);
 }
 
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+
+	fprintf(f, "DMA Dev %u, '%s' [%s]\n", dev->dev_id, dev->name,
+			dev->started ? "started" : "stopped");
+	fprintf(f, "  Driver: %s\n", dev->driver_name);
+	fprintf(f, "  Socket Id: %d\n", dev->socket_id);
+
+	if (dev->dev_ops->dump != NULL)
+		return (*dev->dev_ops->dump)(dev, f);
+	return 0;
+}
+
 int
 rte_dmadev_selftest(uint16_t dev_id)
 {
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 1659ceaf2..d64df17bd 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -357,6 +357,25 @@ __rte_experimental
 int
 rte_dmadev_close(uint16_t dev_id);
 
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Non-zero otherwise.
+ */
+__rte_experimental
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f);
+
 /**
  * @warning
  * @b EXPERIMENTAL: this API may change without prior notice.
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
index ef03cf7cd..428ddc943 100644
--- a/lib/dmadev/rte_dmadev_pmd.h
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -99,6 +99,9 @@ typedef int (*dmadev_close_t)(struct rte_dmadev *dev);
 typedef int (*dmadev_reset_t)(struct rte_dmadev *dev);
 /**< @internal Function used to reset a configured device. */
 
+typedef int (*dmadev_dump_t)(struct rte_dmadev *dev, FILE *f);
+/**< @internal Function used to dump out the state of a device for debugging. */
+
 typedef int (*dmadev_queue_setup_t)(struct rte_dmadev *dev,
 				    const struct rte_dmadev_queue_conf *conf);
 /**< @internal Function used to allocate and set up a virt queue. */
@@ -147,6 +150,8 @@ struct rte_dmadev_ops {
 	dmadev_close_t dev_close;
 	/**< Reset device. */
 	dmadev_reset_t dev_reset;
+	/**< Dump device info for debugging */
+	dmadev_dump_t dump;
 
 	/**< Allocate and set up a virt queue. */
 	dmadev_queue_setup_t queue_setup;
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index a0a121f3a..ed051d54f 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -4,6 +4,7 @@ EXPERIMENTAL {
 	rte_dmadevices;
 	rte_dmadev_pmd_allocate;
 	rte_dmadev_count;
+	rte_dmadev_dump;
 	rte_dmadev_get_dev_id;
 	rte_dmadev_socket_id;
 	rte_dmadev_info_get;
-- 
2.30.2


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [RFC UPDATE PATCH 4/9] dmadev: remove xstats functions
  2021-07-06 20:28 ` [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates Bruce Richardson
                     ` (2 preceding siblings ...)
  2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 3/9] dmadev: add dump function Bruce Richardson
@ 2021-07-06 20:28   ` Bruce Richardson
  2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 5/9] dmadev: drop cookie typedef Bruce Richardson
                     ` (5 subsequent siblings)
  9 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-06 20:28 UTC (permalink / raw)
  To: dev
  Cc: Chengwen Feng, Jerin Jacob, Jerin Jacob, Morten Brørup,
	Bruce Richardson

remove the xstats function calls, as they are not needed for this class
as-yet.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/dmadev/rte_dmadev.c     | 63 --------------------------
 lib/dmadev/rte_dmadev.h     | 89 -------------------------------------
 lib/dmadev/rte_dmadev_pmd.h | 19 --------
 lib/dmadev/version.map      |  3 --
 4 files changed, 174 deletions(-)

diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
index ffd7c5b97..fed168675 100644
--- a/lib/dmadev/rte_dmadev.c
+++ b/lib/dmadev/rte_dmadev.c
@@ -282,69 +282,6 @@ rte_dmadev_stats_reset(uint16_t dev_id, int vq_id)
 	return (*dev->dev_ops->stats_reset)(dev, vq_id);
 }
 
-static int
-xstats_get_count(uint16_t dev_id)
-{
-	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
-
-	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get_names, -ENOTSUP);
-
-	return (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
-}
-
-int
-rte_dmadev_xstats_names_get(uint16_t dev_id,
-			    struct rte_dmadev_xstats_name *xstats_names,
-			    uint32_t size)
-{
-	struct rte_dmadev *dev;
-	int cnt_expected_entries;
-
-	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
-
-	cnt_expected_entries = xstats_get_count(dev_id);
-
-	if (xstats_names == NULL || cnt_expected_entries < 0 ||
-	    (int)size < cnt_expected_entries || size == 0)
-		return cnt_expected_entries;
-
-	dev = &rte_dmadevices[dev_id];
-
-	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get_names, -ENOTSUP);
-	return (*dev->dev_ops->xstats_get_names)(dev, xstats_names, size);
-}
-
-int
-rte_dmadev_xstats_get(uint16_t dev_id, const uint32_t ids[],
-		      uint64_t values[], uint32_t n)
-{
-	struct rte_dmadev *dev;
-
-	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
-	RTE_FUNC_PTR_OR_ERR_RET(ids, -EINVAL);
-	RTE_FUNC_PTR_OR_ERR_RET(values, -EINVAL);
-
-	dev = &rte_dmadevices[dev_id];
-
-	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_get, -ENOTSUP);
-
-	return (*dev->dev_ops->xstats_get)(dev, ids, values, n);
-}
-
-int
-rte_dmadev_xstats_reset(uint16_t dev_id, const uint32_t ids[], uint32_t nb_ids)
-{
-	struct rte_dmadev *dev;
-
-	RTE_DMADEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
-
-	dev = &rte_dmadevices[dev_id];
-
-	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->xstats_reset, -ENOTSUP);
-
-	return (*dev->dev_ops->xstats_reset)(dev, ids, nb_ids);
-}
-
 int
 rte_dmadev_dump(uint16_t dev_id, FILE *f)
 {
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index d64df17bd..2bfc0b619 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -824,95 +824,6 @@ __rte_experimental
 int
 rte_dmadev_stats_reset(uint16_t dev_id, int vq_id);
 
-/** Maximum name length for extended statistics counters */
-#define RTE_DMA_DEV_XSTATS_NAME_SIZE 64
-
-/**
- * A name-key lookup element for extended statistics.
- *
- * This structure is used to map between names and ID numbers
- * for extended ethdev statistics.
- */
-struct rte_dmadev_xstats_name {
-	char name[RTE_DMA_DEV_XSTATS_NAME_SIZE];
-};
-
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Retrieve names of extended statistics of a DMA device.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param[out] xstats_names
- *   Block of memory to insert names into. Must be at least size in capacity.
- *   If set to NULL, function returns required capacity.
- * @param size
- *   Capacity of xstats_names (number of names).
- * @return
- *   - positive value lower or equal to size: success. The return value
- *     is the number of entries filled in the stats table.
- *   - positive value higher than size: error, the given statistics table
- *     is too small. The return value corresponds to the size that should
- *     be given to succeed. The entries in the table are not valid and
- *     shall not be used by the caller.
- *   - negative value on error.
- */
-__rte_experimental
-int
-rte_dmadev_xstats_names_get(uint16_t dev_id,
-			    struct rte_dmadev_xstats_name *xstats_names,
-			    uint32_t size);
-
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Retrieve extended statistics of a DMA device.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param ids
- *   The id numbers of the stats to get. The ids can be got from the stat
- *   position in the stat list from rte_dmadev_get_xstats_names().
- * @param[out] values
- *   The values for each stats request by ID.
- * @param n
- *   The number of stats requested.
- *
- * @return
- *   - positive value: number of stat entries filled into the values array.
- *   - negative value on error.
- */
-__rte_experimental
-int
-rte_dmadev_xstats_get(uint16_t dev_id, const uint32_t ids[],
-		      uint64_t values[], uint32_t n);
-
-/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
- * Reset the values of the xstats of the selected component in the device.
- *
- * @param dev_id
- *   The identifier of the device.
- * @param ids
- *   Selects specific statistics to be reset. When NULL, all statistics
- *   will be reset. If non-NULL, must point to array of at least
- *   *nb_ids* size.
- * @param nb_ids
- *   The number of ids available from the *ids* array. Ignored when ids is NULL.
- *
- * @return
- *   - zero: successfully reset the statistics to zero.
- *   - negative value on error.
- */
-__rte_experimental
-int
-rte_dmadev_xstats_reset(uint16_t dev_id, const uint32_t ids[], uint32_t nb_ids);
-
 /**
  * @warning
  * @b EXPERIMENTAL: this API may change without prior notice.
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
index 428ddc943..d0ec43af6 100644
--- a/lib/dmadev/rte_dmadev_pmd.h
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -120,19 +120,6 @@ typedef int (*dmadev_stats_get_t)(struct rte_dmadev *dev, int vq_id,
 typedef int (*dmadev_stats_reset_t)(struct rte_dmadev *dev, int vq_id);
 /**< @internal Function used to reset basic statistics. */
 
-typedef int (*dmadev_xstats_get_names_t)(const struct rte_dmadev *dev,
-		struct rte_dmadev_xstats_name *xstats_names,
-		uint32_t size);
-/**< @internal Function used to get names of extended stats. */
-
-typedef int (*dmadev_xstats_get_t)(const struct rte_dmadev *dev,
-		const uint32_t ids[], uint64_t values[], uint32_t n);
-/**< @internal Function used to retrieve extended stats. */
-
-typedef int (*dmadev_xstats_reset_t)(struct rte_dmadev *dev,
-				     const uint32_t ids[], uint32_t nb_ids);
-/**< @internal Function used to reset extended stats. */
-
 typedef int (*dmadev_selftest_t)(uint16_t dev_id);
 /**< @internal Function used to start dmadev selftest. */
 
@@ -164,12 +151,6 @@ struct rte_dmadev_ops {
 	dmadev_stats_get_t stats_get;
 	/**< Reset basic statistics. */
 	dmadev_stats_reset_t stats_reset;
-	/**< Get names of extended stats. */
-	dmadev_xstats_get_names_t xstats_get_names;
-	/**< Get extended statistics. */
-	dmadev_xstats_get_t xstats_get;
-	/**< Reset extended statistics values. */
-	dmadev_xstats_reset_t xstats_reset;
 
 	/**< Device selftest function */
 	dmadev_selftest_t dev_selftest;
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index ed051d54f..a4d6b539a 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -27,9 +27,6 @@ EXPERIMENTAL {
 	rte_dmadev_completed_fails;
 	rte_dmadev_stats_get;
 	rte_dmadev_stats_reset;
-	rte_dmadev_xstats_names_get;
-	rte_dmadev_xstats_get;
-	rte_dmadev_xstats_reset;
 	rte_dmadev_selftest;
 
 	local: *;
-- 
2.30.2


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [RFC UPDATE PATCH 5/9] dmadev: drop cookie typedef
  2021-07-06 20:28 ` [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates Bruce Richardson
                     ` (3 preceding siblings ...)
  2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 4/9] dmadev: remove xstats functions Bruce Richardson
@ 2021-07-06 20:28   ` Bruce Richardson
  2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 6/9] dmadev: allow NULL parameters to completed ops call Bruce Richardson
                     ` (4 subsequent siblings)
  9 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-06 20:28 UTC (permalink / raw)
  To: dev
  Cc: Chengwen Feng, Jerin Jacob, Jerin Jacob, Morten Brørup,
	Bruce Richardson

Rather than having a special type for the index values used in dmadev,
just use regular int types, with appropriate return value notifications.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/dmadev/rte_dmadev.h      | 59 ++++++++++++------------------------
 lib/dmadev/rte_dmadev_core.h | 12 ++++----
 2 files changed, 26 insertions(+), 45 deletions(-)

diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 2bfc0b619..8cfe14dd2 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -106,29 +106,6 @@ extern "C" {
 #include <rte_errno.h>
 #include <rte_compat.h>
 
-/**
- * dma_cookie_t - an opaque DMA cookie
- *
- * If dma_cookie_t is >=0 it's a DMA operation request cookie, <0 it's a error
- * code.
- * When using cookies, comply with the following rules:
- * a) Cookies for each virtual queue are independent.
- * b) For a virt queue, the cookie are monotonically incremented, when it reach
- *    the INT_MAX, it wraps back to zero.
- * c) The initial cookie of a virt queue is zero, after the device is stopped or
- *    reset, the virt queue's cookie needs to be reset to zero.
- * Example:
- *    step-1: start one dmadev
- *    step-2: enqueue a copy operation, the cookie return is 0
- *    step-3: enqueue a copy operation again, the cookie return is 1
- *    ...
- *    step-101: stop the dmadev
- *    step-102: start the dmadev
- *    step-103: enqueue a copy operation, the cookie return is 0
- *    ...
- */
-typedef int32_t dma_cookie_t;
-
 /**
  * dma_scatterlist - can hold scatter DMA operation request
  */
@@ -517,13 +494,14 @@ rte_dmadev_queue_info_get(uint16_t dev_id, uint16_t vq_id,
  *   An opaque flags for this operation.
  *
  * @return
- *   dma_cookie_t: please refer to the corresponding definition.
+ *   <0 on error,
+ *   on success, index of enqueued copy job, monotonically increasing between 0..UINT16_MAX
  *
  * NOTE: The caller must ensure that the input parameter is valid and the
  *       corresponding device supports the operation.
  */
 __rte_experimental
-static inline dma_cookie_t
+static inline int
 rte_dmadev_copy(uint16_t dev_id, uint16_t vq_id, rte_iova_t src, rte_iova_t dst,
 		uint32_t length, uint64_t flags)
 {
@@ -552,13 +530,14 @@ rte_dmadev_copy(uint16_t dev_id, uint16_t vq_id, rte_iova_t src, rte_iova_t dst,
  *   An opaque flags for this operation.
  *
  * @return
- *   dma_cookie_t: please refer to the corresponding definition.
+ *   <0 on error,
+ *   on success, index of enqueued copy job, monotonically increasing between 0..UINT16_MAX
  *
  * NOTE: The caller must ensure that the input parameter is valid and the
  *       corresponding device supports the operation.
  */
 __rte_experimental
-static inline dma_cookie_t
+static inline int
 rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vq_id,
 		   const struct dma_scatterlist *sg,
 		   uint32_t sg_len, uint64_t flags)
@@ -590,13 +569,14 @@ rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vq_id,
  *   An opaque flags for this operation.
  *
  * @return
- *   dma_cookie_t: please refer to the corresponding definition.
+ *   <0 on error,
+ *   on success, index of enqueued copy job, monotonically increasing between 0..UINT16_MAX
  *
  * NOTE: The caller must ensure that the input parameter is valid and the
  *       corresponding device supports the operation.
  */
 __rte_experimental
-static inline dma_cookie_t
+static inline int
 rte_dmadev_fill(uint16_t dev_id, uint16_t vq_id, uint64_t pattern,
 		rte_iova_t dst, uint32_t length, uint64_t flags)
 {
@@ -627,13 +607,14 @@ rte_dmadev_fill(uint16_t dev_id, uint16_t vq_id, uint64_t pattern,
  *   An opaque flags for this operation.
  *
  * @return
- *   dma_cookie_t: please refer to the corresponding definition.
+ *   <0 on error,
+ *   on success, index of enqueued copy job, monotonically increasing between 0..UINT16_MAX
  *
  * NOTE: The caller must ensure that the input parameter is valid and the
  *       corresponding device supports the operation.
  */
 __rte_experimental
-static inline dma_cookie_t
+static inline int
 rte_dmadev_fill_sg(uint16_t dev_id, uint16_t vq_id, uint64_t pattern,
 		   const struct dma_scatterlist *sg, uint32_t sg_len,
 		   uint64_t flags)
@@ -716,8 +697,8 @@ rte_dmadev_perform(uint16_t dev_id, uint16_t vq_id)
  *   The identifier of virt queue.
  * @param nb_cpls
  *   The maximum number of completed operations that can be processed.
- * @param[out] cookie
- *   The last completed operation's cookie.
+ * @param[out] last_idx
+ *   The last completed operation's index, as returned when entry was enqueued
  * @param[out] has_error
  *   Indicates if there are transfer error.
  *
@@ -730,11 +711,11 @@ rte_dmadev_perform(uint16_t dev_id, uint16_t vq_id)
 __rte_experimental
 static inline uint16_t
 rte_dmadev_completed(uint16_t dev_id, uint16_t vq_id, const uint16_t nb_cpls,
-		     dma_cookie_t *cookie, bool *has_error)
+		     uint16_t *last_idx, bool *has_error)
 {
 	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
 	has_error = false;
-	return (*dev->completed)(dev, vq_id, nb_cpls, cookie, has_error);
+	return (*dev->completed)(dev, vq_id, nb_cpls, last_idx, has_error);
 }
 
 /**
@@ -752,8 +733,8 @@ rte_dmadev_completed(uint16_t dev_id, uint16_t vq_id, const uint16_t nb_cpls,
  *   Indicates the size of status array.
  * @param[out] status
  *   The error code of operations that failed to complete.
- * @param[out] cookie
- *   The last failed completed operation's cookie.
+ * @param[out] last_idx
+ *   The last failed completed operation's index.
  *
  * @return
  *   The number of operations that failed to complete.
@@ -765,10 +746,10 @@ __rte_experimental
 static inline uint16_t
 rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vq_id,
 			   const uint16_t nb_status, uint32_t *status,
-			   dma_cookie_t *cookie)
+			   uint16_t *last_idx)
 {
 	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
-	return (*dev->completed_fails)(dev, vq_id, nb_status, status, cookie);
+	return (*dev->completed_fails)(dev, vq_id, nb_status, status, last_idx);
 }
 
 struct rte_dmadev_stats {
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index 80b56ed83..7fbefe8f9 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -16,22 +16,22 @@
 
 struct rte_dmadev;
 
-typedef dma_cookie_t (*dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vq_id,
+typedef int (*dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vq_id,
 				      rte_iova_t src, rte_iova_t dst,
 				      uint32_t length, uint64_t flags);
 /**< @internal Function used to enqueue a copy operation. */
 
-typedef dma_cookie_t (*dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vq_id,
+typedef int (*dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vq_id,
 					 const struct dma_scatterlist *sg,
 					 uint32_t sg_len, uint64_t flags);
 /**< @internal Function used to enqueue a scatter list copy operation. */
 
-typedef dma_cookie_t (*dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vq_id,
+typedef int (*dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vq_id,
 				      uint64_t pattern, rte_iova_t dst,
 				      uint32_t length, uint64_t flags);
 /**< @internal Function used to enqueue a fill operation. */
 
-typedef dma_cookie_t (*dmadev_fill_sg_t)(struct rte_dmadev *dev, uint16_t vq_id,
+typedef int (*dmadev_fill_sg_t)(struct rte_dmadev *dev, uint16_t vq_id,
 			uint64_t pattern, const struct dma_scatterlist *sg,
 			uint32_t sg_len, uint64_t flags);
 /**< @internal Function used to enqueue a scatter list fill operation. */
@@ -44,12 +44,12 @@ typedef int (*dmadev_perform_t)(struct rte_dmadev *dev, uint16_t vq_id);
 
 typedef uint16_t (*dmadev_completed_t)(struct rte_dmadev *dev, uint16_t vq_id,
 				       const uint16_t nb_cpls,
-				       dma_cookie_t *cookie, bool *has_error);
+				       uint16_t *last_idx, bool *has_error);
 /**< @internal Function used to return number of successful completed operations */
 
 typedef uint16_t (*dmadev_completed_fails_t)(struct rte_dmadev *dev,
 			uint16_t vq_id, const uint16_t nb_status,
-			uint32_t *status, dma_cookie_t *cookie);
+			uint32_t *status, uint16_t *last_idx);
 /**< @internal Function used to return number of failed completed operations */
 
 #define RTE_DMADEV_NAME_MAX_LEN	64 /**< Max length of name of DMA PMD */
-- 
2.30.2


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [RFC UPDATE PATCH 6/9] dmadev: allow NULL parameters to completed ops call
  2021-07-06 20:28 ` [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates Bruce Richardson
                     ` (4 preceding siblings ...)
  2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 5/9] dmadev: drop cookie typedef Bruce Richardson
@ 2021-07-06 20:28   ` Bruce Richardson
  2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 7/9] dmadev: stats structure updates Bruce Richardson
                     ` (3 subsequent siblings)
  9 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-06 20:28 UTC (permalink / raw)
  To: dev
  Cc: Chengwen Feng, Jerin Jacob, Jerin Jacob, Morten Brørup,
	Bruce Richardson

Allow the user to skip passing the "out" parameters to the
rte_dmadev_completed() API call, by using local replacements in the
inline function. This simplifies drivers, and compilers should be able
to remove the branches at compile time in many cases.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/dmadev/rte_dmadev.h | 19 +++++++++++++++++--
 1 file changed, 17 insertions(+), 2 deletions(-)

diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 8cfe14dd2..eb78f3805 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -698,9 +698,11 @@ rte_dmadev_perform(uint16_t dev_id, uint16_t vq_id)
  * @param nb_cpls
  *   The maximum number of completed operations that can be processed.
  * @param[out] last_idx
- *   The last completed operation's index, as returned when entry was enqueued
+ *   The last completed operation's index, as returned when entry was enqueued.
+ *   If not required, NULL can be passed in.
  * @param[out] has_error
  *   Indicates if there are transfer error.
+ *   If not required, may be passed as NULL.
  *
  * @return
  *   The number of operations that successful completed.
@@ -714,7 +716,20 @@ rte_dmadev_completed(uint16_t dev_id, uint16_t vq_id, const uint16_t nb_cpls,
 		     uint16_t *last_idx, bool *has_error)
 {
 	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
-	has_error = false;
+	bool err = false;
+	uint16_t idx;
+
+	/* ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the value is NULL
+	 * - If address of local variable is passed as parameter, then compiler can
+	 *   know it's non-NULL.
+	 */
+	if (has_error == NULL)
+		has_error = &err;
+	if (last_idx == NULL)
+		last_idx = &idx;
+
 	return (*dev->completed)(dev, vq_id, nb_cpls, last_idx, has_error);
 }
 
-- 
2.30.2


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [RFC UPDATE PATCH 7/9] dmadev: stats structure updates
  2021-07-06 20:28 ` [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates Bruce Richardson
                     ` (5 preceding siblings ...)
  2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 6/9] dmadev: allow NULL parameters to completed ops call Bruce Richardson
@ 2021-07-06 20:28   ` Bruce Richardson
  2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 8/9] drivers: add dma driver category Bruce Richardson
                     ` (2 subsequent siblings)
  9 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-06 20:28 UTC (permalink / raw)
  To: dev
  Cc: Chengwen Feng, Jerin Jacob, Jerin Jacob, Morten Brørup,
	Bruce Richardson

Drop the failed enqueue count since that is best tracked by the
application so that retries of the same job can be counted as desired by
the app developer. Since the "doorbell" function is separate from the
actual functions to enqueue descriptors, track a separate stat for jobs
which were submitted to hardware, in case the "enqueued" count includes
jobs which were not yet "doorbelled".

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 lib/dmadev/rte_dmadev.h | 12 ++++--------
 1 file changed, 4 insertions(+), 8 deletions(-)

diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index eb78f3805..bdb531a53 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -768,14 +768,10 @@ rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vq_id,
 }
 
 struct rte_dmadev_stats {
-	uint64_t enqueue_fail_count;
-	/**< Conut of all operations which failed enqueued */
-	uint64_t enqueued_count;
-	/**< Count of all operations which successful enqueued */
-	uint64_t completed_fail_count;
-	/**< Count of all operations which failed to complete */
-	uint64_t completed_count;
-	/**< Count of all operations which successful complete */
+	uint64_t enqueued_count;       /**< Count of operations which were successful enqueued */
+	uint64_t submitted_count;      /**< Count of operations which were submitted to hardware */
+	uint64_t completed_fail_count; /**< Count of operations which failed to complete */
+	uint64_t completed_count;      /**< Count of operations which successful complete */
 };
 
 /**
-- 
2.30.2


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [RFC UPDATE PATCH 8/9] drivers: add dma driver category
  2021-07-06 20:28 ` [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates Bruce Richardson
                     ` (6 preceding siblings ...)
  2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 7/9] dmadev: stats structure updates Bruce Richardson
@ 2021-07-06 20:28   ` Bruce Richardson
  2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 9/9] app/test: add basic dmadev unit test Bruce Richardson
  2021-07-07  3:16   ` [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates fengchengwen
  9 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-06 20:28 UTC (permalink / raw)
  To: dev
  Cc: Chengwen Feng, Jerin Jacob, Jerin Jacob, Morten Brørup,
	Bruce Richardson

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 drivers/dma/meson.build | 11 +++++++++++
 1 file changed, 11 insertions(+)
 create mode 100644 drivers/dma/meson.build

diff --git a/drivers/dma/meson.build b/drivers/dma/meson.build
new file mode 100644
index 000000000..986b28be5
--- /dev/null
+++ b/drivers/dma/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 Intel Corporation
+
+if is_windows
+    subdir_done()
+endif
+
+drivers = [
+]
+
+std_deps = ['dmadev']
-- 
2.30.2


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [RFC UPDATE PATCH 9/9] app/test: add basic dmadev unit test
  2021-07-06 20:28 ` [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates Bruce Richardson
                     ` (7 preceding siblings ...)
  2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 8/9] drivers: add dma driver category Bruce Richardson
@ 2021-07-06 20:28   ` Bruce Richardson
  2021-07-07  3:16   ` [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates fengchengwen
  9 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-06 20:28 UTC (permalink / raw)
  To: dev
  Cc: Chengwen Feng, Jerin Jacob, Jerin Jacob, Morten Brørup,
	Bruce Richardson

Add in some basic dmadev unit tests for testing drivers and the library
itself.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 app/test/meson.build   |   2 +
 app/test/test_dmadev.c | 320 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 322 insertions(+)
 create mode 100644 app/test/test_dmadev.c

diff --git a/app/test/meson.build b/app/test/meson.build
index 0a5f42557..223ca210a 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -36,6 +36,7 @@ test_sources = files(
         'test_debug.c',
         'test_distributor.c',
         'test_distributor_perf.c',
+        'test_dmadev.c',
         'test_eal_flags.c',
         'test_eal_fs.c',
         'test_efd.c',
@@ -155,6 +156,7 @@ test_deps = [
         'cmdline',
         'cryptodev',
         'distributor',
+        'dmadev',
         'efd',
         'ethdev',
         'eventdev',
diff --git a/app/test/test_dmadev.c b/app/test/test_dmadev.c
new file mode 100644
index 000000000..df301b385
--- /dev/null
+++ b/app/test/test_dmadev.c
@@ -0,0 +1,320 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 Intel Corporation
+ */
+
+#include <unistd.h>
+
+#include <rte_mbuf.h>
+#include <rte_dmadev.h>
+#include "test.h"
+
+#define COPY_LEN 1024
+
+static struct rte_mempool *pool;
+static uint16_t id_count = 0;
+
+#define PRINT_ERR(...) print_err(__func__, __LINE__, __VA_ARGS__)
+
+static inline int
+__rte_format_printf(3, 4)
+print_err(const char *func, int lineno, const char *format, ...)
+{
+	va_list ap;
+	int ret;
+
+	ret = fprintf(stderr, "In %s:%d - ", func, lineno);
+	va_start(ap, format);
+	ret += vfprintf(stderr, format, ap);
+	va_end(ap);
+
+	return ret;
+}
+
+static int
+do_multi_copies(int dev_id, int split_batches, int split_completions)
+{
+	struct rte_mbuf *srcs[32], *dsts[32];
+	unsigned int i, j;
+	bool dma_err = false;
+
+	for (i = 0; i < RTE_DIM(srcs); i++) {
+		char *src_data;
+
+		if (split_batches && i == RTE_DIM(srcs) / 2)
+			rte_dmadev_perform(dev_id, 0);
+
+		srcs[i] = rte_pktmbuf_alloc(pool);
+		dsts[i] = rte_pktmbuf_alloc(pool);
+		src_data = rte_pktmbuf_mtod(srcs[i], char *);
+		if (srcs[i] == NULL || dsts[i] == NULL) {
+			PRINT_ERR("Error allocating buffers\n");
+			return -1;
+		}
+
+		for (j = 0; j < COPY_LEN; j++)
+			src_data[j] = rand() & 0xFF;
+
+		if (rte_dmadev_copy(dev_id, 0, srcs[i]->buf_iova + srcs[i]->data_off,
+				dsts[i]->buf_iova + dsts[i]->data_off, COPY_LEN, 0) != id_count++) {
+			PRINT_ERR("Error with rte_dmadev_copy for buffer %u\n", i);
+			return -1;
+		}
+	}
+	rte_dmadev_perform(dev_id, 0);
+	usleep(100);
+
+	if (split_completions) {
+		/* gather completions in two halves */
+		uint16_t half_len = RTE_DIM(srcs) / 2;
+		int ret = rte_dmadev_completed(dev_id, 0, half_len, NULL, &dma_err);
+		if (ret != half_len || dma_err) {
+			PRINT_ERR("Error with rte_dmadev_completed - first half. ret = %d, expected ret = %u, dma_err = %d\n",
+					ret, half_len, dma_err);
+			rte_dmadev_dump(dev_id, stdout);
+			return -1;
+		}
+		ret = rte_dmadev_completed(dev_id, 0, half_len, NULL, &dma_err);
+		if (ret != half_len || dma_err) {
+			PRINT_ERR("Error with rte_dmadev_completed - second half. ret = %d, expected ret = %u, dma_err = %d\n",
+					ret, half_len, dma_err);
+			rte_dmadev_dump(dev_id, stdout);
+			return -1;
+		}
+	} else {
+		/* gather all completions in one go */
+		if ((j = rte_dmadev_completed(dev_id, 0, RTE_DIM(srcs), NULL,
+				&dma_err)) != RTE_DIM(srcs) || dma_err) {
+			PRINT_ERR("Error with rte_dmadev_completed, %u [expected: %zu], dma_err = %d\n",
+					j, RTE_DIM(srcs), dma_err);
+			rte_dmadev_dump(dev_id, stdout);
+			return -1;
+		}
+	}
+
+	/* check for empty */
+	if (rte_dmadev_completed(dev_id, 0, RTE_DIM(srcs), NULL, &dma_err) != 0 || dma_err) {
+		PRINT_ERR("Error with rte_dmadev_completed - ops unexpectedly returned\n");
+		rte_dmadev_dump(dev_id, stdout);
+		return -1;
+	}
+
+	for (i = 0; i < RTE_DIM(srcs); i++) {
+		char *src_data, *dst_data;
+
+		src_data = rte_pktmbuf_mtod(srcs[i], char *);
+		dst_data = rte_pktmbuf_mtod(dsts[i], char *);
+		for (j = 0; j < COPY_LEN; j++)
+			if (src_data[j] != dst_data[j]) {
+				PRINT_ERR("Error with copy of packet %u, byte %u\n", i, j);
+				return -1;
+			}
+		rte_pktmbuf_free(srcs[i]);
+		rte_pktmbuf_free(dsts[i]);
+	}
+	return 0;
+}
+
+static int
+test_enqueue_copies(int dev_id)
+{
+	unsigned int i;
+	uint16_t id;
+
+	/* test doing a single copy */
+	do {
+		struct rte_mbuf *src, *dst;
+		char *src_data, *dst_data;
+
+		src = rte_pktmbuf_alloc(pool);
+		dst = rte_pktmbuf_alloc(pool);
+		src_data = rte_pktmbuf_mtod(src, char *);
+		dst_data = rte_pktmbuf_mtod(dst, char *);
+
+		for (i = 0; i < COPY_LEN; i++)
+			src_data[i] = rand() & 0xFF;
+
+		id = rte_dmadev_copy(dev_id, 0, src->buf_iova + src->data_off,
+				dst->buf_iova + dst->data_off, COPY_LEN, 0);
+		if (id != id_count) {
+			PRINT_ERR("Error with rte_dmadev_copy, got %u, expected %u\n",
+					id, id_count);
+			return -1;
+		}
+		if (rte_dmadev_perform(dev_id, 0) < 0) {
+			PRINT_ERR("Error with rte_dmadev_perform\n");
+			return -1;
+		}
+		/* give time for copy to finish, then check it was done */
+		usleep(10);
+
+		for (i = 0; i < COPY_LEN; i++) {
+			if (dst_data[i] != src_data[i]) {
+				PRINT_ERR("Data mismatch at char %u [Got %02x not %02x]\n", i,
+						dst_data[i], src_data[i]);
+				rte_dmadev_dump(dev_id, stderr);
+				return -1;
+			}
+		}
+
+		/* now check completion works */
+		if (rte_dmadev_completed(dev_id, 0, 1, &id, NULL) != 1) {
+			PRINT_ERR("Error with rte_dmadev_completed\n");
+			return -1;
+		}
+		if (id != id_count) {
+			PRINT_ERR("Error:incorrect job id received, %u [expected %u]\n", id, id_count);
+			return -1;
+		}
+
+		rte_pktmbuf_free(src);
+		rte_pktmbuf_free(dst);
+
+		/* now check completion works */
+		if (rte_dmadev_completed(dev_id, 0, 1, NULL, NULL) != 0) {
+			PRINT_ERR("Error with rte_dmadev_completed in empty check\n");
+			return -1;
+		}
+		id_count++;
+
+	} while (0);
+
+	/* test doing a multiple single copies */
+	do {
+		const uint16_t max_ops = 4;
+		struct rte_mbuf *src, *dst;
+		char *src_data, *dst_data;
+
+		src = rte_pktmbuf_alloc(pool);
+		dst = rte_pktmbuf_alloc(pool);
+		src_data = rte_pktmbuf_mtod(src, char *);
+		dst_data = rte_pktmbuf_mtod(dst, char *);
+
+		for (i = 0; i < COPY_LEN; i++)
+			src_data[i] = rand() & 0xFF;
+
+		/* perform the same copy <max_ops> times */
+		for (i = 0; i < max_ops; i++) {
+			if (rte_dmadev_copy(dev_id, 0,
+					src->buf_iova + src->data_off,
+					dst->buf_iova + dst->data_off,
+					COPY_LEN, 0) != id_count++) {
+				PRINT_ERR("Error with rte_dmadev_copy\n");
+				return -1;
+			}
+			rte_dmadev_perform(dev_id, 0);
+		}
+		usleep(10);
+
+		if ((i = rte_dmadev_completed(dev_id, 0, max_ops * 2, &id, NULL)) != max_ops) {
+			PRINT_ERR("Error with rte_dmadev_completed, got %u not %u\n", i, max_ops);
+			return -1;
+		}
+		if (id != id_count - 1) {
+			PRINT_ERR("Error, incorrect job id returned: got %u not %u\n", id, id_count - 1);
+			return -1;
+		}
+		for (i = 0; i < COPY_LEN; i++) {
+			if (dst_data[i] != src_data[i]) {
+				PRINT_ERR("Data mismatch at char %u\n", i);
+				return -1;
+			}
+		}
+		rte_pktmbuf_free(src);
+		rte_pktmbuf_free(dst);
+	} while (0);
+
+	/* test doing multiple copies */
+	return do_multi_copies(dev_id, 0, 0) /* enqueue and complete one batch at a time */
+			|| do_multi_copies(dev_id, 1, 0) /* enqueue 2 batches and then complete both */
+			|| do_multi_copies(dev_id, 0, 1); /* enqueue 1 batch, then complete in two halves */
+}
+
+static int
+test_dmadev_instance(uint16_t dev_id)
+{
+#define TEST_RINGSIZE 512
+	struct rte_dmadev_info info;
+	struct rte_dmadev_conf conf = { .nb_hw_queues = 1};
+	struct rte_dmadev_queue_conf qconf = { .nb_desc = TEST_RINGSIZE };
+	int i;
+
+	rte_dmadev_info_get(dev_id, &info);
+	if (info.max_hw_queues < 1) {
+		PRINT_ERR("Error, no queues reported on device id %u\n", dev_id);
+		return -1;
+	}
+	if (rte_dmadev_configure(dev_id, &conf) != 0) {
+		PRINT_ERR("Error with rte_rawdev_configure()\n");
+		return -1;
+	}
+	if (rte_dmadev_queue_setup(dev_id, &qconf) != 0) {
+		PRINT_ERR("Error with queue configuration\n");
+		return -1;
+	}
+	rte_dmadev_info_get(dev_id, &info);
+	if (info.nb_hw_queues != 1) {
+		PRINT_ERR("Error, no configured queues reported on device id %u\n", dev_id);
+		return -1;
+	}
+
+	if (rte_dmadev_start(dev_id) != 0) {
+		PRINT_ERR("Error with rte_rawdev_start()\n");
+		return -1;
+	}
+	id_count = 0;
+
+	/* create a mempool for running tests */
+	pool = rte_pktmbuf_pool_create("TEST_DMADEV_POOL",
+			TEST_RINGSIZE * 2, /* n == num elements */
+			32,  /* cache size */
+			0,   /* priv size */
+			2048, /* data room size */
+			info.socket_id);
+	if (pool == NULL) {
+		PRINT_ERR("Error with mempool creation\n");
+		return -1;
+	}
+
+	/* run the test cases */
+	printf("DMA Dev: %u, Running Copy Tests\n", dev_id);
+	for (i = 0; i < 768; i++) {
+		struct rte_dmadev_stats stats;
+
+		if (test_enqueue_copies(dev_id) != 0) {
+			printf("Error with iteration %d\n", i);
+			rte_dmadev_dump(dev_id, stdout);
+			goto err;
+		}
+
+		rte_dmadev_stats_get(dev_id, 0, &stats);
+		printf("Ops enqueued: %"PRIu64"\t", stats.enqueued_count);
+		printf("Ops completed: %"PRIu64"\r", stats.completed_count);
+	}
+	printf("\n");
+
+	rte_mempool_free(pool);
+	rte_dmadev_stop(dev_id);
+
+	return 0;
+
+err:
+	rte_mempool_free(pool);
+	rte_dmadev_stop(dev_id);
+	return -1;
+}
+
+static int
+test_dmadevs(void)
+{
+	int i;
+
+	if (rte_dmadev_count() == 0)
+		return TEST_SKIPPED;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++)
+		if (rte_dmadevices[i].attached && test_dmadev_instance(i) < 0)
+			return -1;
+	return 0;
+}
+
+REGISTER_TEST_COMMAND(dmadev_autotest, test_dmadevs);
-- 
2.30.2


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates
  2021-07-06 20:28 ` [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates Bruce Richardson
                     ` (8 preceding siblings ...)
  2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 9/9] app/test: add basic dmadev unit test Bruce Richardson
@ 2021-07-07  3:16   ` fengchengwen
  2021-07-07  8:11     ` Bruce Richardson
                       ` (2 more replies)
  9 siblings, 3 replies; 339+ messages in thread
From: fengchengwen @ 2021-07-07  3:16 UTC (permalink / raw)
  To: Bruce Richardson, dev; +Cc: Jerin Jacob, Jerin Jacob, Morten Brørup

LGTM, thanks

And I'am prepare dmadev V2, include:
a) Fix code review comments (e.g. multiple-process support, doxygen, comments, typo)
b) Flatten device abstraction to two layer: dmadev <> vchan
c) Public API use dev_id and vchan_id to locate one vchan
d) Using the flags parameter instead of the fence API
e) Rename rte_dmadev_perform to rte_dmadev_submit so it corresponds to the stats variable.

PS: Some code (lib/dmadev) will rebase this patchset


On 2021/7/7 4:28, Bruce Richardson wrote:
> This patchset contains a series of changes to dmadev based on work being done to
> port over our drivers to test this new infrastructure. Some of these are bug
> fixes to enable compilation e.g. missing exports or meson.build files, while
> others are suggested changes to enhance the API. All these patches are to be
> applied on top of [1] as they are mostly suggested changes to that RFC i.e.
> patches to the patch!
> 
> The final patch includes some basic sanity tests for copy operations that we
> have ported over from the ioat self-tests to use the dmadev APIs. The basic
> dataplane part of those tests is probably ok for now, but the initialization of
> queues in that test code may need some enhancement. Feedback welcome.
> 
> A tree with all these patches applied can be got at [2] if anyone wants to use
> that as a basis for working on drivers, or for other discussion.
> 
> [1] http://patches.dpdk.org/project/dpdk/patch/1625231891-2963-1-git-send-email-fengchengwen@huawei.com/
> [2] https://github.com/bruce-richardson/dpdk/tree/dmadev-rfcs
> 
> Bruce Richardson (9):
>   dmadev: add missing exports
>   dmadev: change virtual addresses to IOVA
>   dmadev: add dump function
>   dmadev: remove xstats functions
>   dmadev: drop cookie typedef
>   dmadev: allow NULL parameters to completed ops call
>   dmadev: stats structure updates
>   drivers: add dma driver category
>   app/test: add basic dmadev unit test
> 
>  app/test/meson.build         |   2 +
>  app/test/test_dmadev.c       | 320 +++++++++++++++++++++++++++++++++++
>  drivers/dma/meson.build      |  11 ++
>  drivers/meson.build          |   1 +
>  lib/dmadev/rte_dmadev.c      |  66 ++------
>  lib/dmadev/rte_dmadev.h      | 204 +++++++---------------
>  lib/dmadev/rte_dmadev_core.h |  16 +-
>  lib/dmadev/rte_dmadev_pmd.h  |  24 +--
>  lib/dmadev/version.map       |   7 +-
>  9 files changed, 425 insertions(+), 226 deletions(-)
>  create mode 100644 app/test/test_dmadev.c
>  create mode 100644 drivers/dma/meson.build
> 
> --
> 2.30.2
> 
> 
> .
> 


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-05 17:16       ` Bruce Richardson
@ 2021-07-07  8:08         ` Jerin Jacob
  2021-07-07  8:35           ` Bruce Richardson
  0 siblings, 1 reply; 339+ messages in thread
From: Jerin Jacob @ 2021-07-07  8:08 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: Chengwen Feng, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	dpdk-dev, Morten Brørup, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma,
	Radha Mohan Chintakuntla

On Mon, Jul 5, 2021 at 10:46 PM Bruce Richardson
<bruce.richardson@intel.com> wrote:
>
> On Mon, Jul 05, 2021 at 09:25:34PM +0530, Jerin Jacob wrote:
> >
> > On Mon, Jul 5, 2021 at 4:22 PM Bruce Richardson
> > <bruce.richardson@intel.com> wrote:
> > >
> > > On Sun, Jul 04, 2021 at 03:00:30PM +0530, Jerin Jacob wrote:
> > > > On Fri, Jul 2, 2021 at 6:51 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
> > > > >
> > > > > This patch introduces 'dmadevice' which is a generic type of DMA
> > > > > device.
> <snip>
> > >
> > > +1 and the terminology with regards to queues and channels. With our ioat
> > > hardware, each HW queue was called a channel for instance.
> >
> > Looks like <dmadev> <> <channel> can cover all the use cases, if the
> > HW has more than
> > 1 queues it can be exposed as separate dmadev dev.
> >
>
> Fine for me.
>
> However, just to confirm that Morten's suggestion of using a
> (device-specific void *) channel pointer rather than dev_id + channel_id
> pair of parameters won't work for you? You can't store a pointer or dev
> index in the channel struct in the driver?

Yes. That will work. To confirm, the suggestion is to use, void *
object instead of channel_id,
That will avoid one more indirection.(index -> pointer)


>
> >
> <snip>
> > > > > + *
> > > > > + * If dma_cookie_t is >=0 it's a DMA operation request cookie, <0 it's a error
> > > > > + * code.
> > > > > + * When using cookies, comply with the following rules:
> > > > > + * a) Cookies for each virtual queue are independent.
> > > > > + * b) For a virt queue, the cookie are monotonically incremented, when it reach
> > > > > + *    the INT_MAX, it wraps back to zero.
> > >
> > > I disagree with the INT_MAX (or INT32_MAX) value here. If we use that
> > > value, it means that we cannot use implicit wrap-around inside the CPU and
> > > have to check for the INT_MAX value. Better to:
> > > 1. Specify that it wraps at UINT16_MAX which allows us to just use a
> > > uint16_t internally and wrap-around automatically, or:
> > > 2. Specify that it wraps at a power-of-2 value >= UINT16_MAX, giving
> > > drivers the flexibility at what value to wrap around.
> >
> > I think, (2) better than 1. I think, even better to wrap around the number of
> > descriptors configured in dev_configure()(We cake make this as the power of 2),
> >
>
> Interesting, I hadn't really considered that before. My only concern
> would be if an app wants to keep values in the app ring for a while after
> they have been returned from dmadev. I thought it easier to have the full
> 16-bit counter value returned to the user to give the most flexibility,
> given that going from that to any power-of-2 ring size smaller is a trivial
> operation.
>
> Overall, while my ideal situation is to always have a 0..UINT16_MAX return
> value from the function, I can live with your suggestion of wrapping at
> ring_size, since drivers will likely do that internally anyway.
> I think wrapping at INT32_MAX is too awkward and will be error prone since
> we can't rely on hardware automatically wrapping to zero, nor on the driver
> having pre-masked the value.

OK. +1 for UINT16_MAX

>
> > >
> > > > > + * c) The initial cookie of a virt queue is zero, after the device is stopped or
> > > > > + *    reset, the virt queue's cookie needs to be reset to zero.
> <snip>
> > > >
> > > > Please add some good amount of reserved bits and have API to init this
> > > > structure for future ABI stability, say rte_dmadev_queue_config_init()
> > > > or so.
> > > >
> > >
> > > I don't think that is necessary. Since the config struct is used only as
> > > parameter to the config function, any changes to it can be managed by
> > > versioning that single function. Padding would only be necessary if we had
> > > an array of these config structs somewhere.
> >
> > OK.
> >
> > For some reason, the versioning API looks ugly to me in code instead of keeping
> > some rsvd fields look cool to me with init function.
> >
> > But I agree. function versioning works in this case. No need to find other API
> > if tt is not general DPDK API practice.
> >
>
> The one thing I would suggest instead of the padding is for the internal
> APIS, to pass the struct size through, since we can't version those - and
> for padding we can't know whether any replaced padding should be used or
> not. Specifically:
>
>         typedef int (*rte_dmadev_configure_t)(struct rte_dmadev *dev, struct
>                         rte_dmadev_conf *cfg, size_t cfg_size);
>
> but for the public function:
>
>         int
>         rte_dmadev_configure(struct rte_dmadev *dev, struct
>                         rte_dmadev_conf *cfg)
>         {
>                 ...
>                 ret = dev->ops.configure(dev, cfg, sizeof(*cfg));
>                 ...
>         }

Makes sense.

>
> Then if we change the structure and version the config API, the driver can
> tell from the size what struct version it is and act accordingly. Without
> that, each time the struct changed, we'd have to add a new function pointer
> to the device ops.
>
> > In other libraries, I have seen such _init or function that can use
> > for this as well as filling default value
> > in some cases implementation values is not zero).
> > So that application can avoid memset for param structure.
> > Added rte_event_queue_default_conf_get() in eventdev spec for this.
> >
>
> I think that would largely have the same issues, unless it returned a
> pointer to data inside the driver - and which therefore could not be
> modified. Alternatively it would mean that the memory would have been
> allocated in the driver and we would need to ensure proper cleanup
> functions were called to free memory afterwards. Supporting having the
> config parameter as a local variable I think makes things a lot easier.
>
> > No strong opinion on this.
> >
> >
> >
> > >
> > > >
> > > > > +
> > > > > +/**
> > > > > + * A structure used to retrieve information of a DMA virt queue.
> > > > > + */
> > > > > +struct rte_dmadev_queue_info {
> > > > > +       enum dma_transfer_direction direction;
> > > >
> > > > A queue may support all directions so I think it should be a bitfield.
> > > >
> > > > > +       /**< Associated transfer direction */
> > > > > +       uint16_t hw_queue_id; /**< The HW queue on which to create virt queue */
> > > > > +       uint16_t nb_desc; /**< Number of descriptor for this virt queue */
> > > > > +       uint64_t dev_flags; /**< Device specific flags */
> > > > > +};
> > > > > +
> > > >
> > > > > +__rte_experimental
> > > > > +static inline dma_cookie_t
> > > > > +rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vq_id,
> > > > > +                  const struct dma_scatterlist *sg,
> > > > > +                  uint32_t sg_len, uint64_t flags)
> > > >
> > > > I would like to change this as:
> > > > rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vq_id, const struct
> > > > rte_dma_sg *src, uint32_t nb_src,
> > > > const struct rte_dma_sg *dst, uint32_t nb_dst) or so allow the use case like

In the above syntax, @Chengchang Tang
rte_dma_sg needs to contains only ptr and size.

> > > > src 30 MB copy can be splitted as written as 1 MB x 30 dst.
> > > >
>
> Out of interest, do you see much benefit (and in what way) from having the
> scatter-gather support? Unlike sending 5 buffers in one packet rather than
> 5 buffers in 5 packets to a NIC, copying an array of memory in one op vs
> multiple is functionally identical.

Knowing upfront or in shot if such segments expressed can have better
optimization
in drivers like
1) In one DMA job request HW can fill multiple segments vs multiple
DMA job requests with each segment.
2) Single completion i.e less overhead system.
3) Less latency for the job requests.


>
> > > >
> > > >
> <snip>
> > Got it. In order to save space if first CL size for fastpath(Saving 8B
> > for the pointer) and to avoid
> > function overhead, Can we use one bit of flags of op function to
> > enable the fence?
> >
>
> The original ioat implementation did exactly that. However, I then
> discovered that because a fence logically belongs between two operations,
> does the fence flag on an operation mean "don't do any jobs after this
> until this job has completed" or does it mean "don't start this job until
> all previous jobs have completed". [Or theoretically does it mean both :-)]
> Naturally, some hardware does it the former way (i.e. fence flag goes on
> last op before fence), while other hardware the latter way (i.e. fence flag
> goes on first op after the fence). Therefore, since fencing is about
> ordering *between* two (sets of) jobs, I decided that it should do exactly
> that and go between two jobs, so there is no ambiguity!
>
> However, I'm happy enough to switch to having a fence flag, but I think if
> we do that, it should be put in the "first job after fence" case, because
> it is always easier to modify a previously written job if we need to, than
> to save the flag for a future one.
>
> Alternatively, if we keep the fence as a separate function, I'm happy
> enough for it not to be on the same cacheline as the "hot" operations,
> since fencing will always introduce a small penalty anyway.

Ack.
You may consider two flags, FENCE_THEN_JOB and JOB_THEN_FENCE( If
there any use case for this or it makes sense for your HW)


For us, Fence is NOP for us as we have an implicit fence between each
HW job descriptor.


>
> > >
> > > >
> <snip>
> > > > Since we have additional function call overhead in all the
> > > > applications for this scheme, I would like to understand
> > > > the use of doing this way vs enq does the doorbell implicitly from
> > > > driver/application PoV?
> > > >
> > >
> > > In our benchmarks it's just faster. When we tested it, the overhead of the
> > > function calls was noticably less than the cost of building up the
> > > parameter array(s) for passing the jobs in as a burst. [We don't see this
> > > cost with things like NIC I/O since DPDK tends to already have the mbuf
> > > fully populated before the TX call anyway.]
> >
> > OK. I agree with stack population.
> >
> > My question was more on doing implicit doorbell update enq. Is doorbell write
> > costly in other HW compare to a function call? In our HW, it is just write of
> > the number of instructions written in a register.
> >
> > Also, we need to again access the internal PMD memory structure to find
> > where to write etc if it is a separate function.
> >
>
> The cost varies depending on a number of factors - even writing to a single
> HW register can be very slow if that register is mapped as device
> (uncacheable) memory, since (AFAIK) it will act as a full fence and wait

I don't know, At least in our case, writes are write-back. so core does not need
to wait.(If there is no read operation).

> for the write to go all the way to hardware. For more modern HW, the cost
> can be lighter. However, any cost of HW writes is going to be the same
> whether its a separate function call or not.
>
> However, the main thing about the doorbell update is that it's a
> once-per-burst thing, rather than a once-per-job. Therefore, even if you
> have to re-read the struct memory (which is likely still somewhere in your
> cores' cache), any extra small cost of doing so is to be amortized over the
> cost of a whole burst of copies.

Linux kernel has xmit_more flag in skb to address similar thing.
i.e enq job flag can have one more bit field to say update ring bell or not?
Rather having yet another function overhead.IMO, it is the best of both worlds.


>
> >
> > >
> > > >
> <snip>
> > > > > +
> > > > > +/**
> > > > > + * @warning
> > > > > + * @b EXPERIMENTAL: this API may change without prior notice.
> > > > > + *
> > > > > + * Returns the number of operations that failed to complete.
> > > > > + * NOTE: This API was used when rte_dmadev_completed has_error was set.
> > > > > + *
> > > > > + * @param dev_id
> > > > > + *   The identifier of the device.
> > > > > + * @param vq_id
> > > > > + *   The identifier of virt queue.
> > > > (> + * @param nb_status
> > > > > + *   Indicates the size  of status array.
> > > > > + * @param[out] status
> > > > > + *   The error code of operations that failed to complete.
> > > > > + * @param[out] cookie
> > > > > + *   The last failed completed operation's cookie.
> > > > > + *
> > > > > + * @return
> > > > > + *   The number of operations that failed to complete.
> > > > > + *
> > > > > + * NOTE: The caller must ensure that the input parameter is valid and the
> > > > > + *       corresponding device supports the operation.
> > > > > + */
> > > > > +__rte_experimental
> > > > > +static inline uint16_t
> > > > > +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vq_id,
> > > > > +                          const uint16_t nb_status, uint32_t *status,
> > > > > +                          dma_cookie_t *cookie)
> > > >
> > > > IMO, it is better to move cookie/rind_idx at 3.
> > > > Why it would return any array of errors? since it called after
> > > > rte_dmadev_completed() has
> > > > has_error. Is it better to change
> > > >
> > > > rte_dmadev_error_status((uint16_t dev_id, uint16_t vq_id, dma_cookie_t
> > > > *cookie,  uint32_t *status)
> > > >
> > > > I also think, we may need to set status as bitmask and enumerate all
> > > > the combination of error codes
> > > > of all the driver and return string from driver existing rte_flow_error
> > > >
> > > > See
> > > > struct rte_flow_error {
> > > >         enum rte_flow_error_type type; /**< Cause field and error types. */
> > > >         const void *cause; /**< Object responsible for the error. */
> > > >         const char *message; /**< Human-readable error message. */
> > > > };
> > > >
> > >
> > > I think we need a multi-return value API here, as we may add operations in
> > > future which have non-error status values to return. The obvious case is
> > > DMA engines which support "compare" operations. In that case a successful
> > > compare (as in there were no DMA or HW errors) can return "equal" or
> > > "not-equal" as statuses. For general "copy" operations, the faster
> > > completion op can be used to just return successful values (and only call
> > > this status version on error), while apps using those compare ops or a
> > > mixture of copy and compare ops, would always use the slower one that
> > > returns status values for each and every op..
> > >
> > > The ioat APIs used 32-bit integer values for this status array so as to
> > > allow e.g. 16-bits for error code and 16-bits for future status values. For
> > > most operations there should be a fairly small set of things that can go
> > > wrong, i.e. bad source address, bad destination address or invalid length.
> > > Within that we may have a couple of specifics for why an address is bad,
> > > but even so I don't think we need to start having multiple bit
> > > combinations.
> >
> > OK. What is the purpose of errors status? Is it for application printing it or
> > Does the application need to take any action based on specific error requests?
>
> It's largely for information purposes, but in the case of SVA/SVM errors
> could occur due to the memory not being pinned, i.e. a page fault, in some
> cases. If that happens, then it's up the app to either touch the memory and
> retry the copy, or to do a SW memcpy as a fallback.
>
> In other error cases, I think it's good to tell the application if it's
> passing around bad data, or data that is beyond the scope of hardware, e.g.
> a copy that is beyond what can be done in a single transaction for a HW
> instance. Given that there are always things that can go wrong, I think we
> need some error reporting mechanism.
>
> > If the former is scope, then we need to define the standard enum value
> > for the error right?
> > ie. uint32_t *status needs to change to enum rte_dma_error or so.
> >
> Sure. Perhaps an error/status structure either is an option, where we
> explicitly call out error info from status info.

Agree. Better to have a structure with filed like,

1)  enum rte_dma_error_type
2)  memory to store, informative message on fine aspects of error.
LIke address caused issue etc.(Which will be driver-specific
information).


>
> >
> >
> <snip to end>
>
> /Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates
  2021-07-07  3:16   ` [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates fengchengwen
@ 2021-07-07  8:11     ` Bruce Richardson
  2021-07-07  8:14     ` Bruce Richardson
  2021-07-07 10:42     ` Jerin Jacob
  2 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-07  8:11 UTC (permalink / raw)
  To: fengchengwen; +Cc: dev, Jerin Jacob, Jerin Jacob, Morten Brørup

On Wed, Jul 07, 2021 at 11:16:44AM +0800, fengchengwen wrote:
> LGTM, thanks
> 
> And I'am prepare dmadev V2, include:
> a) Fix code review comments (e.g. multiple-process support, doxygen, comments, typo)
> b) Flatten device abstraction to two layer: dmadev <> vchan
> c) Public API use dev_id and vchan_id to locate one vchan
> d) Using the flags parameter instead of the fence API

Bit uncertain about this one still

> e) Rename rte_dmadev_perform to rte_dmadev_submit so it corresponds to the stats variable.
> 
> PS: Some code (lib/dmadev) will rebase this patchset
> 
This was not meant to be a separate patchset, but rather to try and keep us
all in sync on what was being looked at and tested. Please just pull in the
changes from this set (as many as you are happy with) into your V2 RFC to
simplify things. It's better to just have the one master RFC into which
changes are pulled.

Thanks,
/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates
  2021-07-07  3:16   ` [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates fengchengwen
  2021-07-07  8:11     ` Bruce Richardson
@ 2021-07-07  8:14     ` Bruce Richardson
  2021-07-07 10:42     ` Jerin Jacob
  2 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-07  8:14 UTC (permalink / raw)
  To: fengchengwen; +Cc: dev, Jerin Jacob, Jerin Jacob, Morten Brørup

On Wed, Jul 07, 2021 at 11:16:44AM +0800, fengchengwen wrote:
> LGTM, thanks
> 
> And I'am prepare dmadev V2, include: a) Fix code review comments (e.g.
> multiple-process support, doxygen, comments, typo) b) Flatten device
> abstraction to two layer: dmadev <> vchan c) Public API use dev_id and
> vchan_id to locate one vchan d) Using the flags parameter instead of the
> fence API e) Rename rte_dmadev_perform to rte_dmadev_submit so it
> corresponds to the stats variable.
> 
> PS: Some code (lib/dmadev) will rebase this patchset
> 
> 
As well as posting the RFC v2 here, could you also perhaps post it to a
github repo, so that Jerin, myself and others can send pull-requests with
suggested changes?  For key areas of discussion, working through github
with patches sent via pull request you can take in directly will probably
be faster for getting us to a v3. The mail threads on list are already
getting very long and hard to follow.

Regards,
/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [RFC UPDATE PATCH 1/9] dmadev: add missing exports
  2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 1/9] dmadev: add missing exports Bruce Richardson
@ 2021-07-07  8:26     ` David Marchand
  2021-07-07  8:36       ` Bruce Richardson
  0 siblings, 1 reply; 339+ messages in thread
From: David Marchand @ 2021-07-07  8:26 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: dev, Chengwen Feng, Jerin Jacob, Jerin Jacob, Morten Brørup

On Tue, Jul 6, 2021 at 10:29 PM Bruce Richardson
<bruce.richardson@intel.com> wrote:
>
> Export the rte_dmadevices array and the allocate and release functions
> which are needed by PMDs.

rte_dmadevices[] might be an issue for inline accesses, but pmd
allocate/release should be internal (driver only).


-- 
David Marchand


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-07  8:08         ` Jerin Jacob
@ 2021-07-07  8:35           ` Bruce Richardson
  2021-07-07 10:34             ` Jerin Jacob
  0 siblings, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-07-07  8:35 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Chengwen Feng, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	dpdk-dev, Morten Brørup, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma,
	Radha Mohan Chintakuntla

On Wed, Jul 07, 2021 at 01:38:58PM +0530, Jerin Jacob wrote:
> On Mon, Jul 5, 2021 at 10:46 PM Bruce Richardson
> <bruce.richardson@intel.com> wrote:
> >
> > On Mon, Jul 05, 2021 at 09:25:34PM +0530, Jerin Jacob wrote:
> > >
> > > On Mon, Jul 5, 2021 at 4:22 PM Bruce Richardson
> > > <bruce.richardson@intel.com> wrote:
> > > >
> > > > On Sun, Jul 04, 2021 at 03:00:30PM +0530, Jerin Jacob wrote:
> > > > > On Fri, Jul 2, 2021 at 6:51 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
> > > > > >
> > > > > > This patch introduces 'dmadevice' which is a generic type of DMA
> > > > > > device.
> > <snip>
> > > >
> > > > +1 and the terminology with regards to queues and channels. With our ioat
> > > > hardware, each HW queue was called a channel for instance.
> > >
> > > Looks like <dmadev> <> <channel> can cover all the use cases, if the
> > > HW has more than
> > > 1 queues it can be exposed as separate dmadev dev.
> > >
> >
> > Fine for me.
> >
> > However, just to confirm that Morten's suggestion of using a
> > (device-specific void *) channel pointer rather than dev_id + channel_id
> > pair of parameters won't work for you? You can't store a pointer or dev
> > index in the channel struct in the driver?
> 
> Yes. That will work. To confirm, the suggestion is to use, void *
> object instead of channel_id,
> That will avoid one more indirection.(index -> pointer)
> 

The proposal was to use it in place of "dev_id + channel_id", i.e.

copy(dev_id, ch_id, src, dst, len, flags) --> copy(ch_ptr, src, dst, len, flags)

Where the channel pointer implicitly indicates the device too. However, I
realise now that this would be something completely transparent to the
driver as it would all have to be implemented in the dmadev level, and
lead to lots of duplication of function pointers, etc. Therefore, let's
just go with original scheme. :-(

> 
> >
> > >

<snip>

> > > Got it. In order to save space if first CL size for fastpath(Saving 8B
> > > for the pointer) and to avoid
> > > function overhead, Can we use one bit of flags of op function to
> > > enable the fence?
> > >
> >
> > The original ioat implementation did exactly that. However, I then
> > discovered that because a fence logically belongs between two operations,
> > does the fence flag on an operation mean "don't do any jobs after this
> > until this job has completed" or does it mean "don't start this job until
> > all previous jobs have completed". [Or theoretically does it mean both :-)]
> > Naturally, some hardware does it the former way (i.e. fence flag goes on
> > last op before fence), while other hardware the latter way (i.e. fence flag
> > goes on first op after the fence). Therefore, since fencing is about
> > ordering *between* two (sets of) jobs, I decided that it should do exactly
> > that and go between two jobs, so there is no ambiguity!
> >
> > However, I'm happy enough to switch to having a fence flag, but I think if
> > we do that, it should be put in the "first job after fence" case, because
> > it is always easier to modify a previously written job if we need to, than
> > to save the flag for a future one.
> >
> > Alternatively, if we keep the fence as a separate function, I'm happy
> > enough for it not to be on the same cacheline as the "hot" operations,
> > since fencing will always introduce a small penalty anyway.
> 
> Ack.
> You may consider two flags, FENCE_THEN_JOB and JOB_THEN_FENCE( If
> there any use case for this or it makes sense for your HW)
> 
> 
> For us, Fence is NOP for us as we have an implicit fence between each
> HW job descriptor.
> 

I actually still think that having a separate fence function in the "ops"
section is the best approach here. It's unabiguous as to whether it's
fence-before or fence-after, and if we have it in the ops, it doesn't use a
"fast-path" slot.

However, if we *really* want to use a flag instead, I don't see the value
in having two flags, it will be really confusing.  Instead, if we do go
with a flag, I think "RTE_DMA_PRE_FENCE" should be the name, indicating
that the fence occurs before the job in question.

> 
> >
> > > >
> > > > >
> > <snip>
> > > > > Since we have additional function call overhead in all the
> > > > > applications for this scheme, I would like to understand
> > > > > the use of doing this way vs enq does the doorbell implicitly from
> > > > > driver/application PoV?
> > > > >
> > > >
> > > > In our benchmarks it's just faster. When we tested it, the overhead of the
> > > > function calls was noticably less than the cost of building up the
> > > > parameter array(s) for passing the jobs in as a burst. [We don't see this
> > > > cost with things like NIC I/O since DPDK tends to already have the mbuf
> > > > fully populated before the TX call anyway.]
> > >
> > > OK. I agree with stack population.
> > >
> > > My question was more on doing implicit doorbell update enq. Is doorbell write
> > > costly in other HW compare to a function call? In our HW, it is just write of
> > > the number of instructions written in a register.
> > >
> > > Also, we need to again access the internal PMD memory structure to find
> > > where to write etc if it is a separate function.
> > >
> >
> > The cost varies depending on a number of factors - even writing to a single
> > HW register can be very slow if that register is mapped as device
> > (uncacheable) memory, since (AFAIK) it will act as a full fence and wait
> 
> I don't know, At least in our case, writes are write-back. so core does not need
> to wait.(If there is no read operation).
> 
> > for the write to go all the way to hardware. For more modern HW, the cost
> > can be lighter. However, any cost of HW writes is going to be the same
> > whether its a separate function call or not.
> >
> > However, the main thing about the doorbell update is that it's a
> > once-per-burst thing, rather than a once-per-job. Therefore, even if you
> > have to re-read the struct memory (which is likely still somewhere in your
> > cores' cache), any extra small cost of doing so is to be amortized over the
> > cost of a whole burst of copies.
> 
> Linux kernel has xmit_more flag in skb to address similar thing.
> i.e enq job flag can have one more bit field to say update ring bell or not?
> Rather having yet another function overhead.IMO, it is the best of both worlds.
> 

It's just more conditionals and branches all through the code. Inside the
user application, the user has to check whether to set the flag or not (or
special-case the last transaction outside the loop), and within the driver,
there has to be a branch whether or not to call the doorbell function. The
code on both sides is far simpler and more readable if the doorbell
function is exactly that - a separate function.

> 
> >
> > >
> > > >
> > > > >
> > <snip>
> > > > > > + +/** + * @warning + * @b EXPERIMENTAL: this API may change
> > > > > > without prior notice.  + * + * Returns the number of operations
> > > > > > that failed to complete.  + * NOTE: This API was used when
> > > > > > rte_dmadev_completed has_error was set.  + * + * @param dev_id
> > > > > > + *   The identifier of the device.  + * @param vq_id + *   The
> > > > > > identifier of virt queue.
> > > > > (> + * @param nb_status
> > > > > > + *   Indicates the size  of status array.  + * @param[out]
> > > > > > status + *   The error code of operations that failed to
> > > > > > complete.  + * @param[out] cookie + *   The last failed
> > > > > > completed operation's cookie.  + * + * @return + *   The number
> > > > > > of operations that failed to complete.  + * + * NOTE: The
> > > > > > caller must ensure that the input parameter is valid and the +
> > > > > > *       corresponding device supports the operation.  + */
> > > > > > +__rte_experimental +static inline uint16_t
> > > > > > +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vq_id, +
> > > > > > const uint16_t nb_status, uint32_t *status, +
> > > > > > dma_cookie_t *cookie)
> > > > >
> > > > > IMO, it is better to move cookie/rind_idx at 3.  Why it would
> > > > > return any array of errors? since it called after
> > > > > rte_dmadev_completed() has has_error. Is it better to change
> > > > >
> > > > > rte_dmadev_error_status((uint16_t dev_id, uint16_t vq_id,
> > > > > dma_cookie_t *cookie,  uint32_t *status)
> > > > >
> > > > > I also think, we may need to set status as bitmask and enumerate
> > > > > all the combination of error codes of all the driver and return
> > > > > string from driver existing rte_flow_error
> > > > >
> > > > > See struct rte_flow_error { enum rte_flow_error_type type; /**<
> > > > > Cause field and error types. */ const void *cause; /**< Object
> > > > > responsible for the error. */ const char *message; /**<
> > > > > Human-readable error message. */ };
> > > > >
> > > >
> > > > I think we need a multi-return value API here, as we may add
> > > > operations in future which have non-error status values to return.
> > > > The obvious case is DMA engines which support "compare" operations.
> > > > In that case a successful compare (as in there were no DMA or HW
> > > > errors) can return "equal" or "not-equal" as statuses. For general
> > > > "copy" operations, the faster completion op can be used to just
> > > > return successful values (and only call this status version on
> > > > error), while apps using those compare ops or a mixture of copy and
> > > > compare ops, would always use the slower one that returns status
> > > > values for each and every op..
> > > >
> > > > The ioat APIs used 32-bit integer values for this status array so
> > > > as to allow e.g. 16-bits for error code and 16-bits for future
> > > > status values. For most operations there should be a fairly small
> > > > set of things that can go wrong, i.e. bad source address, bad
> > > > destination address or invalid length.  Within that we may have a
> > > > couple of specifics for why an address is bad, but even so I don't
> > > > think we need to start having multiple bit combinations.
> > >
> > > OK. What is the purpose of errors status? Is it for application
> > > printing it or Does the application need to take any action based on
> > > specific error requests?
> >
> > It's largely for information purposes, but in the case of SVA/SVM
> > errors could occur due to the memory not being pinned, i.e. a page
> > fault, in some cases. If that happens, then it's up the app to either
> > touch the memory and retry the copy, or to do a SW memcpy as a
> > fallback.
> >
> > In other error cases, I think it's good to tell the application if it's
> > passing around bad data, or data that is beyond the scope of hardware,
> > e.g.  a copy that is beyond what can be done in a single transaction
> > for a HW instance. Given that there are always things that can go
> > wrong, I think we need some error reporting mechanism.
> >
> > > If the former is scope, then we need to define the standard enum
> > > value for the error right?  ie. uint32_t *status needs to change to
> > > enum rte_dma_error or so.
> > >
> > Sure. Perhaps an error/status structure either is an option, where we
> > explicitly call out error info from status info.
> 
> Agree. Better to have a structure with filed like,
> 
> 1)  enum rte_dma_error_type 2)  memory to store, informative message on
> fine aspects of error.  LIke address caused issue etc.(Which will be
> driver-specific information).
> 
The only issue I have with that is that once we have driver specific
information it is of little use to the application, since it can't know
anything about it excepy maybe log it.  I'd much rather have a set of error
codes telling user that "source address is wrong", "dest address is wrong",
and a generic "an address is wrong" in case driver/HW cannot distinguish
source of error. Can we see how far we get with just error codes before we
start into passing string messages around and all the memory management
headaches that implies.

/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [RFC UPDATE PATCH 1/9] dmadev: add missing exports
  2021-07-07  8:26     ` David Marchand
@ 2021-07-07  8:36       ` Bruce Richardson
  2021-07-07  8:57         ` David Marchand
  0 siblings, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-07-07  8:36 UTC (permalink / raw)
  To: David Marchand
  Cc: dev, Chengwen Feng, Jerin Jacob, Jerin Jacob, Morten Brørup

On Wed, Jul 07, 2021 at 10:26:36AM +0200, David Marchand wrote:
> On Tue, Jul 6, 2021 at 10:29 PM Bruce Richardson
> <bruce.richardson@intel.com> wrote:
> >
> > Export the rte_dmadevices array and the allocate and release functions
> > which are needed by PMDs.
> 
> rte_dmadevices[] might be an issue for inline accesses, but pmd
> allocate/release should be internal (driver only).
> 
So if I understand correctly, they still need to be in the version.map
file, but with "interal" versioning rather than "experimental", right?

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [RFC UPDATE PATCH 1/9] dmadev: add missing exports
  2021-07-07  8:36       ` Bruce Richardson
@ 2021-07-07  8:57         ` David Marchand
  0 siblings, 0 replies; 339+ messages in thread
From: David Marchand @ 2021-07-07  8:57 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: dev, Chengwen Feng, Jerin Jacob, Jerin Jacob, Morten Brørup

On Wed, Jul 7, 2021 at 10:37 AM Bruce Richardson
<bruce.richardson@intel.com> wrote:
>
> On Wed, Jul 07, 2021 at 10:26:36AM +0200, David Marchand wrote:
> > On Tue, Jul 6, 2021 at 10:29 PM Bruce Richardson
> > <bruce.richardson@intel.com> wrote:
> > >
> > > Export the rte_dmadevices array and the allocate and release functions
> > > which are needed by PMDs.
> >
> > rte_dmadevices[] might be an issue for inline accesses, but pmd
> > allocate/release should be internal (driver only).
> >
> So if I understand correctly, they still need to be in the version.map

drivers still need them exported, so yes they must be in version.map
with INTERNAL version.

> file, but with "interal" versioning rather than "experimental", right?

But I would move them too in a separate header, for drivers, like
ethdev_driver.h.
And the __rte_internal tag will be needed at their declarations.

https://git.dpdk.org/dpdk/tree/lib/ethdev/ethdev_driver.h#n1005
https://git.dpdk.org/dpdk/tree/lib/ethdev/version.map#n257


-- 
David Marchand


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-07  8:35           ` Bruce Richardson
@ 2021-07-07 10:34             ` Jerin Jacob
  2021-07-07 11:01               ` Bruce Richardson
  0 siblings, 1 reply; 339+ messages in thread
From: Jerin Jacob @ 2021-07-07 10:34 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: Chengwen Feng, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	dpdk-dev, Morten Brørup, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma,
	Radha Mohan Chintakuntla

On Wed, Jul 7, 2021 at 2:05 PM Bruce Richardson
<bruce.richardson@intel.com> wrote:
>
> On Wed, Jul 07, 2021 at 01:38:58PM +0530, Jerin Jacob wrote:
> > On Mon, Jul 5, 2021 at 10:46 PM Bruce Richardson
> > <bruce.richardson@intel.com> wrote:
> > >
> > > On Mon, Jul 05, 2021 at 09:25:34PM +0530, Jerin Jacob wrote:
> > > >
> > > > On Mon, Jul 5, 2021 at 4:22 PM Bruce Richardson
> > > > <bruce.richardson@intel.com> wrote:
> > > > >
> > > > > On Sun, Jul 04, 2021 at 03:00:30PM +0530, Jerin Jacob wrote:
> > > > > > On Fri, Jul 2, 2021 at 6:51 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
> > > > > > >
> > > > > > > This patch introduces 'dmadevice' which is a generic type of DMA
> > > > > > > device.
> > > <snip>
> > > > >
> > > > > +1 and the terminology with regards to queues and channels. With our ioat
> > > > > hardware, each HW queue was called a channel for instance.
> > > >
> > > > Looks like <dmadev> <> <channel> can cover all the use cases, if the
> > > > HW has more than
> > > > 1 queues it can be exposed as separate dmadev dev.
> > > >
> > >
> > > Fine for me.
> > >
> > > However, just to confirm that Morten's suggestion of using a
> > > (device-specific void *) channel pointer rather than dev_id + channel_id
> > > pair of parameters won't work for you? You can't store a pointer or dev
> > > index in the channel struct in the driver?
> >
> > Yes. That will work. To confirm, the suggestion is to use, void *
> > object instead of channel_id,
> > That will avoid one more indirection.(index -> pointer)
> >
>
> The proposal was to use it in place of "dev_id + channel_id", i.e.
>
> copy(dev_id, ch_id, src, dst, len, flags) --> copy(ch_ptr, src, dst, len, flags)
>
> Where the channel pointer implicitly indicates the device too. However, I
> realise now that this would be something completely transparent to the
> driver as it would all have to be implemented in the dmadev level, and
> lead to lots of duplication of function pointers, etc. Therefore, let's
> just go with original scheme. :-(

Yes. Just go with the original scheme.

>
> >
> > >
> > > >
>
> <snip>
>
> > > > Got it. In order to save space if first CL size for fastpath(Saving 8B
> > > > for the pointer) and to avoid
> > > > function overhead, Can we use one bit of flags of op function to
> > > > enable the fence?
> > > >
> > >
> > > The original ioat implementation did exactly that. However, I then
> > > discovered that because a fence logically belongs between two operations,
> > > does the fence flag on an operation mean "don't do any jobs after this
> > > until this job has completed" or does it mean "don't start this job until
> > > all previous jobs have completed". [Or theoretically does it mean both :-)]
> > > Naturally, some hardware does it the former way (i.e. fence flag goes on
> > > last op before fence), while other hardware the latter way (i.e. fence flag
> > > goes on first op after the fence). Therefore, since fencing is about
> > > ordering *between* two (sets of) jobs, I decided that it should do exactly
> > > that and go between two jobs, so there is no ambiguity!
> > >
> > > However, I'm happy enough to switch to having a fence flag, but I think if
> > > we do that, it should be put in the "first job after fence" case, because
> > > it is always easier to modify a previously written job if we need to, than
> > > to save the flag for a future one.
> > >
> > > Alternatively, if we keep the fence as a separate function, I'm happy
> > > enough for it not to be on the same cacheline as the "hot" operations,
> > > since fencing will always introduce a small penalty anyway.
> >
> > Ack.
> > You may consider two flags, FENCE_THEN_JOB and JOB_THEN_FENCE( If
> > there any use case for this or it makes sense for your HW)
> >
> >
> > For us, Fence is NOP for us as we have an implicit fence between each
> > HW job descriptor.
> >
>
> I actually still think that having a separate fence function in the "ops"
> section is the best approach here. It's unabiguous as to whether it's
> fence-before or fence-after, and if we have it in the ops, it doesn't use a
> "fast-path" slot.
>
> However, if we *really* want to use a flag instead, I don't see the value
> in having two flags, it will be really confusing.  Instead, if we do go
> with a flag, I think "RTE_DMA_PRE_FENCE" should be the name, indicating
> that the fence occurs before the job in question.

IMO, We need to use flags and the name can be RTE_DMA_PRE_FENCE
due to overhead of the driver implementation where the fence request
can be NOP and
to save the first cache line occupancy.

>
> >
> > >
> > > > >
> > > > > >
> > > <snip>
> > > > > > Since we have additional function call overhead in all the
> > > > > > applications for this scheme, I would like to understand
> > > > > > the use of doing this way vs enq does the doorbell implicitly from
> > > > > > driver/application PoV?
> > > > > >
> > > > >
> > > > > In our benchmarks it's just faster. When we tested it, the overhead of the
> > > > > function calls was noticably less than the cost of building up the
> > > > > parameter array(s) for passing the jobs in as a burst. [We don't see this
> > > > > cost with things like NIC I/O since DPDK tends to already have the mbuf
> > > > > fully populated before the TX call anyway.]
> > > >
> > > > OK. I agree with stack population.
> > > >
> > > > My question was more on doing implicit doorbell update enq. Is doorbell write
> > > > costly in other HW compare to a function call? In our HW, it is just write of
> > > > the number of instructions written in a register.
> > > >
> > > > Also, we need to again access the internal PMD memory structure to find
> > > > where to write etc if it is a separate function.
> > > >
> > >
> > > The cost varies depending on a number of factors - even writing to a single
> > > HW register can be very slow if that register is mapped as device
> > > (uncacheable) memory, since (AFAIK) it will act as a full fence and wait
> >
> > I don't know, At least in our case, writes are write-back. so core does not need
> > to wait.(If there is no read operation).
> >
> > > for the write to go all the way to hardware. For more modern HW, the cost
> > > can be lighter. However, any cost of HW writes is going to be the same
> > > whether its a separate function call or not.
> > >
> > > However, the main thing about the doorbell update is that it's a
> > > once-per-burst thing, rather than a once-per-job. Therefore, even if you
> > > have to re-read the struct memory (which is likely still somewhere in your
> > > cores' cache), any extra small cost of doing so is to be amortized over the
> > > cost of a whole burst of copies.
> >
> > Linux kernel has xmit_more flag in skb to address similar thing.
> > i.e enq job flag can have one more bit field to say update ring bell or not?
> > Rather having yet another function overhead.IMO, it is the best of both worlds.
> >
>
> It's just more conditionals and branches all through the code. Inside the
> user application, the user has to check whether to set the flag or not (or
> special-case the last transaction outside the loop), and within the driver,
> there has to be a branch whether or not to call the doorbell function. The
> code on both sides is far simpler and more readable if the doorbell
> function is exactly that - a separate function.

I disagree. The reason is:

We will have two classes of applications

a) do dma copy request as and when it has data(I think, this is the
prime use case), for those,
I think, it is considerable overhead to have two function invocation
per transfer i.e
rte_dma_copy() and rte_dma_perform()

b) do dma copy when the data is reached to a logical state,  like copy
IP frame from Ethernet packets or so,
In that case, the application will have  a LOGIC to detect when to
perform it so on the end of
that rte_dma_copy() flag can be updated to fire the doorbell.

IMO, We are comparing against a branch(flag is already in register) vs
a set of instructions for
1) function pointer overhead
2) Need to use the channel context again back in another function.

IMO, a single branch is most optimal from performance PoV.


>
> >
> > >
> > > >
> > > > >
> > > > > >
> > > <snip>
> > > > > > > + +/** + * @warning + * @b EXPERIMENTAL: this API may change
> > > > > > > without prior notice.  + * + * Returns the number of operations
> > > > > > > that failed to complete.  + * NOTE: This API was used when
> > > > > > > rte_dmadev_completed has_error was set.  + * + * @param dev_id
> > > > > > > + *   The identifier of the device.  + * @param vq_id + *   The
> > > > > > > identifier of virt queue.
> > > > > > (> + * @param nb_status
> > > > > > > + *   Indicates the size  of status array.  + * @param[out]
> > > > > > > status + *   The error code of operations that failed to
> > > > > > > complete.  + * @param[out] cookie + *   The last failed
> > > > > > > completed operation's cookie.  + * + * @return + *   The number
> > > > > > > of operations that failed to complete.  + * + * NOTE: The
> > > > > > > caller must ensure that the input parameter is valid and the +
> > > > > > > *       corresponding device supports the operation.  + */
> > > > > > > +__rte_experimental +static inline uint16_t
> > > > > > > +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vq_id, +
> > > > > > > const uint16_t nb_status, uint32_t *status, +
> > > > > > > dma_cookie_t *cookie)
> > > > > >
> > > > > > IMO, it is better to move cookie/rind_idx at 3.  Why it would
> > > > > > return any array of errors? since it called after
> > > > > > rte_dmadev_completed() has has_error. Is it better to change
> > > > > >
> > > > > > rte_dmadev_error_status((uint16_t dev_id, uint16_t vq_id,
> > > > > > dma_cookie_t *cookie,  uint32_t *status)
> > > > > >
> > > > > > I also think, we may need to set status as bitmask and enumerate
> > > > > > all the combination of error codes of all the driver and return
> > > > > > string from driver existing rte_flow_error
> > > > > >
> > > > > > See struct rte_flow_error { enum rte_flow_error_type type; /**<
> > > > > > Cause field and error types. */ const void *cause; /**< Object
> > > > > > responsible for the error. */ const char *message; /**<
> > > > > > Human-readable error message. */ };
> > > > > >
> > > > >
> > > > > I think we need a multi-return value API here, as we may add
> > > > > operations in future which have non-error status values to return.
> > > > > The obvious case is DMA engines which support "compare" operations.
> > > > > In that case a successful compare (as in there were no DMA or HW
> > > > > errors) can return "equal" or "not-equal" as statuses. For general
> > > > > "copy" operations, the faster completion op can be used to just
> > > > > return successful values (and only call this status version on
> > > > > error), while apps using those compare ops or a mixture of copy and
> > > > > compare ops, would always use the slower one that returns status
> > > > > values for each and every op..
> > > > >
> > > > > The ioat APIs used 32-bit integer values for this status array so
> > > > > as to allow e.g. 16-bits for error code and 16-bits for future
> > > > > status values. For most operations there should be a fairly small
> > > > > set of things that can go wrong, i.e. bad source address, bad
> > > > > destination address or invalid length.  Within that we may have a
> > > > > couple of specifics for why an address is bad, but even so I don't
> > > > > think we need to start having multiple bit combinations.
> > > >
> > > > OK. What is the purpose of errors status? Is it for application
> > > > printing it or Does the application need to take any action based on
> > > > specific error requests?
> > >
> > > It's largely for information purposes, but in the case of SVA/SVM
> > > errors could occur due to the memory not being pinned, i.e. a page
> > > fault, in some cases. If that happens, then it's up the app to either
> > > touch the memory and retry the copy, or to do a SW memcpy as a
> > > fallback.
> > >
> > > In other error cases, I think it's good to tell the application if it's
> > > passing around bad data, or data that is beyond the scope of hardware,
> > > e.g.  a copy that is beyond what can be done in a single transaction
> > > for a HW instance. Given that there are always things that can go
> > > wrong, I think we need some error reporting mechanism.
> > >
> > > > If the former is scope, then we need to define the standard enum
> > > > value for the error right?  ie. uint32_t *status needs to change to
> > > > enum rte_dma_error or so.
> > > >
> > > Sure. Perhaps an error/status structure either is an option, where we
> > > explicitly call out error info from status info.
> >
> > Agree. Better to have a structure with filed like,
> >
> > 1)  enum rte_dma_error_type 2)  memory to store, informative message on
> > fine aspects of error.  LIke address caused issue etc.(Which will be
> > driver-specific information).
> >
> The only issue I have with that is that once we have driver specific
> information it is of little use to the application, since it can't know
> anything about it excepy maybe log it.  I'd much rather have a set of error
> codes telling user that "source address is wrong", "dest address is wrong",
> and a generic "an address is wrong" in case driver/HW cannot distinguish
> source of error. Can we see how far we get with just error codes before we
> start into passing string messages around and all the memory management
> headaches that implies.

Works for me. It should be "enum rte_dma_error_type" then, which has a standard
error type. Which is missing in the spec now.

>
> /Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates
  2021-07-07  3:16   ` [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates fengchengwen
  2021-07-07  8:11     ` Bruce Richardson
  2021-07-07  8:14     ` Bruce Richardson
@ 2021-07-07 10:42     ` Jerin Jacob
  2 siblings, 0 replies; 339+ messages in thread
From: Jerin Jacob @ 2021-07-07 10:42 UTC (permalink / raw)
  To: fengchengwen; +Cc: Bruce Richardson, dpdk-dev, Jerin Jacob, Morten Brørup

On Wed, Jul 7, 2021 at 8:46 AM fengchengwen <fengchengwen@huawei.com> wrote:
>
> LGTM, thanks
>
> And I'am prepare dmadev V2, include:
> a) Fix code review comments (e.g. multiple-process support, doxygen, comments, typo)
> b) Flatten device abstraction to two layer: dmadev <> vchan

I think we should not have "virtual" in API specification. it can be
virtual or real based on real HW/SW/Driver implementation.
I think, just chan enough.

> c) Public API use dev_id and vchan_id to locate one vchan
> d) Using the flags parameter instead of the fence API
> e) Rename rte_dmadev_perform to rte_dmadev_submit so it corresponds to the stats variable.
>
> PS: Some code (lib/dmadev) will rebase this patchset
>
>
> On 2021/7/7 4:28, Bruce Richardson wrote:
> > This patchset contains a series of changes to dmadev based on work being done to
> > port over our drivers to test this new infrastructure. Some of these are bug
> > fixes to enable compilation e.g. missing exports or meson.build files, while
> > others are suggested changes to enhance the API. All these patches are to be
> > applied on top of [1] as they are mostly suggested changes to that RFC i.e.
> > patches to the patch!
> >
> > The final patch includes some basic sanity tests for copy operations that we
> > have ported over from the ioat self-tests to use the dmadev APIs. The basic
> > dataplane part of those tests is probably ok for now, but the initialization of
> > queues in that test code may need some enhancement. Feedback welcome.
> >
> > A tree with all these patches applied can be got at [2] if anyone wants to use
> > that as a basis for working on drivers, or for other discussion.
> >
> > [1] http://patches.dpdk.org/project/dpdk/patch/1625231891-2963-1-git-send-email-fengchengwen@huawei.com/
> > [2] https://github.com/bruce-richardson/dpdk/tree/dmadev-rfcs
> >
> > Bruce Richardson (9):
> >   dmadev: add missing exports
> >   dmadev: change virtual addresses to IOVA
> >   dmadev: add dump function
> >   dmadev: remove xstats functions
> >   dmadev: drop cookie typedef
> >   dmadev: allow NULL parameters to completed ops call
> >   dmadev: stats structure updates
> >   drivers: add dma driver category
> >   app/test: add basic dmadev unit test
> >
> >  app/test/meson.build         |   2 +
> >  app/test/test_dmadev.c       | 320 +++++++++++++++++++++++++++++++++++
> >  drivers/dma/meson.build      |  11 ++
> >  drivers/meson.build          |   1 +
> >  lib/dmadev/rte_dmadev.c      |  66 ++------
> >  lib/dmadev/rte_dmadev.h      | 204 +++++++---------------
> >  lib/dmadev/rte_dmadev_core.h |  16 +-
> >  lib/dmadev/rte_dmadev_pmd.h  |  24 +--
> >  lib/dmadev/version.map       |   7 +-
> >  9 files changed, 425 insertions(+), 226 deletions(-)
> >  create mode 100644 app/test/test_dmadev.c
> >  create mode 100644 drivers/dma/meson.build
> >
> > --
> > 2.30.2
> >
> >
> > .
> >
>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-07 10:34             ` Jerin Jacob
@ 2021-07-07 11:01               ` Bruce Richardson
  2021-07-08  3:11                 ` fengchengwen
  0 siblings, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-07-07 11:01 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Chengwen Feng, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	dpdk-dev, Morten Brørup, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma,
	Radha Mohan Chintakuntla

On Wed, Jul 07, 2021 at 04:04:16PM +0530, Jerin Jacob wrote:
> On Wed, Jul 7, 2021 at 2:05 PM Bruce Richardson
> <bruce.richardson@intel.com> wrote:
> >
> > On Wed, Jul 07, 2021 at 01:38:58PM +0530, Jerin Jacob wrote:
> > > On Mon, Jul 5, 2021 at 10:46 PM Bruce Richardson
> > > <bruce.richardson@intel.com> wrote:
> > > >
> > > > On Mon, Jul 05, 2021 at 09:25:34PM +0530, Jerin Jacob wrote:
> > > > >
> > > > > On Mon, Jul 5, 2021 at 4:22 PM Bruce Richardson
> > > > > <bruce.richardson@intel.com> wrote:
> > > > > >
> > > > > > On Sun, Jul 04, 2021 at 03:00:30PM +0530, Jerin Jacob wrote:
> > > > > > > On Fri, Jul 2, 2021 at 6:51 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
> > > > > > > >
> > > > > > > > This patch introduces 'dmadevice' which is a generic type of DMA
> > > > > > > > device.
> > > > <snip>
> > > > > >
> > > > > > +1 and the terminology with regards to queues and channels. With our ioat
> > > > > > hardware, each HW queue was called a channel for instance.
> > > > >
> > > > > Looks like <dmadev> <> <channel> can cover all the use cases, if the
> > > > > HW has more than
> > > > > 1 queues it can be exposed as separate dmadev dev.
> > > > >
> > > >
> > > > Fine for me.
> > > >
> > > > However, just to confirm that Morten's suggestion of using a
> > > > (device-specific void *) channel pointer rather than dev_id + channel_id
> > > > pair of parameters won't work for you? You can't store a pointer or dev
> > > > index in the channel struct in the driver?
> > >
> > > Yes. That will work. To confirm, the suggestion is to use, void *
> > > object instead of channel_id,
> > > That will avoid one more indirection.(index -> pointer)
> > >
> >
> > The proposal was to use it in place of "dev_id + channel_id", i.e.
> >
> > copy(dev_id, ch_id, src, dst, len, flags) --> copy(ch_ptr, src, dst, len, flags)
> >
> > Where the channel pointer implicitly indicates the device too. However, I
> > realise now that this would be something completely transparent to the
> > driver as it would all have to be implemented in the dmadev level, and
> > lead to lots of duplication of function pointers, etc. Therefore, let's
> > just go with original scheme. :-(
> 
> Yes. Just go with the original scheme.
>
+1

> >
> > >
> > > >
> > > > >
> >
> > <snip>
> >
> > > > > Got it. In order to save space if first CL size for fastpath(Saving 8B
> > > > > for the pointer) and to avoid
> > > > > function overhead, Can we use one bit of flags of op function to
> > > > > enable the fence?
> > > > >
> > > >
> > > > The original ioat implementation did exactly that. However, I then
> > > > discovered that because a fence logically belongs between two operations,
> > > > does the fence flag on an operation mean "don't do any jobs after this
> > > > until this job has completed" or does it mean "don't start this job until
> > > > all previous jobs have completed". [Or theoretically does it mean both :-)]
> > > > Naturally, some hardware does it the former way (i.e. fence flag goes on
> > > > last op before fence), while other hardware the latter way (i.e. fence flag
> > > > goes on first op after the fence). Therefore, since fencing is about
> > > > ordering *between* two (sets of) jobs, I decided that it should do exactly
> > > > that and go between two jobs, so there is no ambiguity!
> > > >
> > > > However, I'm happy enough to switch to having a fence flag, but I think if
> > > > we do that, it should be put in the "first job after fence" case, because
> > > > it is always easier to modify a previously written job if we need to, than
> > > > to save the flag for a future one.
> > > >
> > > > Alternatively, if we keep the fence as a separate function, I'm happy
> > > > enough for it not to be on the same cacheline as the "hot" operations,
> > > > since fencing will always introduce a small penalty anyway.
> > >
> > > Ack.
> > > You may consider two flags, FENCE_THEN_JOB and JOB_THEN_FENCE( If
> > > there any use case for this or it makes sense for your HW)
> > >
> > >
> > > For us, Fence is NOP for us as we have an implicit fence between each
> > > HW job descriptor.
> > >
> >
> > I actually still think that having a separate fence function in the "ops"
> > section is the best approach here. It's unabiguous as to whether it's
> > fence-before or fence-after, and if we have it in the ops, it doesn't use a
> > "fast-path" slot.
> >
> > However, if we *really* want to use a flag instead, I don't see the value
> > in having two flags, it will be really confusing.  Instead, if we do go
> > with a flag, I think "RTE_DMA_PRE_FENCE" should be the name, indicating
> > that the fence occurs before the job in question.
> 
> IMO, We need to use flags and the name can be RTE_DMA_PRE_FENCE
> due to overhead of the driver implementation where the fence request
> can be NOP and
> to save the first cache line occupancy.
> 
> >
> > >
> > > >
> > > > > >
> > > > > > >
> > > > <snip>
> > > > > > > Since we have additional function call overhead in all the
> > > > > > > applications for this scheme, I would like to understand
> > > > > > > the use of doing this way vs enq does the doorbell implicitly from
> > > > > > > driver/application PoV?
> > > > > > >
> > > > > >
> > > > > > In our benchmarks it's just faster. When we tested it, the overhead of the
> > > > > > function calls was noticably less than the cost of building up the
> > > > > > parameter array(s) for passing the jobs in as a burst. [We don't see this
> > > > > > cost with things like NIC I/O since DPDK tends to already have the mbuf
> > > > > > fully populated before the TX call anyway.]
> > > > >
> > > > > OK. I agree with stack population.
> > > > >
> > > > > My question was more on doing implicit doorbell update enq. Is doorbell write
> > > > > costly in other HW compare to a function call? In our HW, it is just write of
> > > > > the number of instructions written in a register.
> > > > >
> > > > > Also, we need to again access the internal PMD memory structure to find
> > > > > where to write etc if it is a separate function.
> > > > >
> > > >
> > > > The cost varies depending on a number of factors - even writing to a single
> > > > HW register can be very slow if that register is mapped as device
> > > > (uncacheable) memory, since (AFAIK) it will act as a full fence and wait
> > >
> > > I don't know, At least in our case, writes are write-back. so core does not need
> > > to wait.(If there is no read operation).
> > >
> > > > for the write to go all the way to hardware. For more modern HW, the cost
> > > > can be lighter. However, any cost of HW writes is going to be the same
> > > > whether its a separate function call or not.
> > > >
> > > > However, the main thing about the doorbell update is that it's a
> > > > once-per-burst thing, rather than a once-per-job. Therefore, even if you
> > > > have to re-read the struct memory (which is likely still somewhere in your
> > > > cores' cache), any extra small cost of doing so is to be amortized over the
> > > > cost of a whole burst of copies.
> > >
> > > Linux kernel has xmit_more flag in skb to address similar thing.
> > > i.e enq job flag can have one more bit field to say update ring bell or not?
> > > Rather having yet another function overhead.IMO, it is the best of both worlds.
> > >
> >
> > It's just more conditionals and branches all through the code. Inside the
> > user application, the user has to check whether to set the flag or not (or
> > special-case the last transaction outside the loop), and within the driver,
> > there has to be a branch whether or not to call the doorbell function. The
> > code on both sides is far simpler and more readable if the doorbell
> > function is exactly that - a separate function.
> 
> I disagree. The reason is:
> 
> We will have two classes of applications
> 
> a) do dma copy request as and when it has data(I think, this is the
> prime use case), for those,
> I think, it is considerable overhead to have two function invocation
> per transfer i.e
> rte_dma_copy() and rte_dma_perform()
> 
> b) do dma copy when the data is reached to a logical state,  like copy
> IP frame from Ethernet packets or so,
> In that case, the application will have  a LOGIC to detect when to
> perform it so on the end of
> that rte_dma_copy() flag can be updated to fire the doorbell.
> 
> IMO, We are comparing against a branch(flag is already in register) vs
> a set of instructions for
> 1) function pointer overhead
> 2) Need to use the channel context again back in another function.
> 
> IMO, a single branch is most optimal from performance PoV.
> 
Ok, let's try it and see how it goes.

> 
> >
> > >
> > > >
> > > > >
> > > > > >
> > > > > > >
> > > > <snip>
> > > > > > > > + +/** + * @warning + * @b EXPERIMENTAL: this API may change
> > > > > > > > without prior notice.  + * + * Returns the number of operations
> > > > > > > > that failed to complete.  + * NOTE: This API was used when
> > > > > > > > rte_dmadev_completed has_error was set.  + * + * @param dev_id
> > > > > > > > + *   The identifier of the device.  + * @param vq_id + *   The
> > > > > > > > identifier of virt queue.
> > > > > > > (> + * @param nb_status
> > > > > > > > + *   Indicates the size  of status array.  + * @param[out]
> > > > > > > > status + *   The error code of operations that failed to
> > > > > > > > complete.  + * @param[out] cookie + *   The last failed
> > > > > > > > completed operation's cookie.  + * + * @return + *   The number
> > > > > > > > of operations that failed to complete.  + * + * NOTE: The
> > > > > > > > caller must ensure that the input parameter is valid and the +
> > > > > > > > *       corresponding device supports the operation.  + */
> > > > > > > > +__rte_experimental +static inline uint16_t
> > > > > > > > +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vq_id, +
> > > > > > > > const uint16_t nb_status, uint32_t *status, +
> > > > > > > > dma_cookie_t *cookie)
> > > > > > >
> > > > > > > IMO, it is better to move cookie/rind_idx at 3.  Why it would
> > > > > > > return any array of errors? since it called after
> > > > > > > rte_dmadev_completed() has has_error. Is it better to change
> > > > > > >
> > > > > > > rte_dmadev_error_status((uint16_t dev_id, uint16_t vq_id,
> > > > > > > dma_cookie_t *cookie,  uint32_t *status)
> > > > > > >
> > > > > > > I also think, we may need to set status as bitmask and enumerate
> > > > > > > all the combination of error codes of all the driver and return
> > > > > > > string from driver existing rte_flow_error
> > > > > > >
> > > > > > > See struct rte_flow_error { enum rte_flow_error_type type; /**<
> > > > > > > Cause field and error types. */ const void *cause; /**< Object
> > > > > > > responsible for the error. */ const char *message; /**<
> > > > > > > Human-readable error message. */ };
> > > > > > >
> > > > > >
> > > > > > I think we need a multi-return value API here, as we may add
> > > > > > operations in future which have non-error status values to return.
> > > > > > The obvious case is DMA engines which support "compare" operations.
> > > > > > In that case a successful compare (as in there were no DMA or HW
> > > > > > errors) can return "equal" or "not-equal" as statuses. For general
> > > > > > "copy" operations, the faster completion op can be used to just
> > > > > > return successful values (and only call this status version on
> > > > > > error), while apps using those compare ops or a mixture of copy and
> > > > > > compare ops, would always use the slower one that returns status
> > > > > > values for each and every op..
> > > > > >
> > > > > > The ioat APIs used 32-bit integer values for this status array so
> > > > > > as to allow e.g. 16-bits for error code and 16-bits for future
> > > > > > status values. For most operations there should be a fairly small
> > > > > > set of things that can go wrong, i.e. bad source address, bad
> > > > > > destination address or invalid length.  Within that we may have a
> > > > > > couple of specifics for why an address is bad, but even so I don't
> > > > > > think we need to start having multiple bit combinations.
> > > > >
> > > > > OK. What is the purpose of errors status? Is it for application
> > > > > printing it or Does the application need to take any action based on
> > > > > specific error requests?
> > > >
> > > > It's largely for information purposes, but in the case of SVA/SVM
> > > > errors could occur due to the memory not being pinned, i.e. a page
> > > > fault, in some cases. If that happens, then it's up the app to either
> > > > touch the memory and retry the copy, or to do a SW memcpy as a
> > > > fallback.
> > > >
> > > > In other error cases, I think it's good to tell the application if it's
> > > > passing around bad data, or data that is beyond the scope of hardware,
> > > > e.g.  a copy that is beyond what can be done in a single transaction
> > > > for a HW instance. Given that there are always things that can go
> > > > wrong, I think we need some error reporting mechanism.
> > > >
> > > > > If the former is scope, then we need to define the standard enum
> > > > > value for the error right?  ie. uint32_t *status needs to change to
> > > > > enum rte_dma_error or so.
> > > > >
> > > > Sure. Perhaps an error/status structure either is an option, where we
> > > > explicitly call out error info from status info.
> > >
> > > Agree. Better to have a structure with filed like,
> > >
> > > 1)  enum rte_dma_error_type 2)  memory to store, informative message on
> > > fine aspects of error.  LIke address caused issue etc.(Which will be
> > > driver-specific information).
> > >
> > The only issue I have with that is that once we have driver specific
> > information it is of little use to the application, since it can't know
> > anything about it excepy maybe log it.  I'd much rather have a set of error
> > codes telling user that "source address is wrong", "dest address is wrong",
> > and a generic "an address is wrong" in case driver/HW cannot distinguish
> > source of error. Can we see how far we get with just error codes before we
> > start into passing string messages around and all the memory management
> > headaches that implies.
> 
> Works for me. It should be "enum rte_dma_error_type" then, which has a standard
> error type. Which is missing in the spec now.
> 
+1

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-07 11:01               ` Bruce Richardson
@ 2021-07-08  3:11                 ` fengchengwen
  2021-07-08 18:35                   ` Jerin Jacob
  0 siblings, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-07-08  3:11 UTC (permalink / raw)
  To: Bruce Richardson, Jerin Jacob
  Cc: Thomas Monjalon, Ferruh Yigit, Jerin Jacob, dpdk-dev,
	Morten Brørup, Nipun Gupta, Hemant Agrawal, Maxime Coquelin,
	Honnappa Nagarahalli, David Marchand, Satananda Burla,
	Prasun Kapoor, Ananyev, Konstantin, liangma,
	Radha Mohan Chintakuntla

On 2021/7/7 19:01, Bruce Richardson wrote:
> On Wed, Jul 07, 2021 at 04:04:16PM +0530, Jerin Jacob wrote:
>> On Wed, Jul 7, 2021 at 2:05 PM Bruce Richardson
>> <bruce.richardson@intel.com> wrote:
>>>
>>> On Wed, Jul 07, 2021 at 01:38:58PM +0530, Jerin Jacob wrote:
>>>> On Mon, Jul 5, 2021 at 10:46 PM Bruce Richardson
>>>> <bruce.richardson@intel.com> wrote:
>>>>>
>>>>> On Mon, Jul 05, 2021 at 09:25:34PM +0530, Jerin Jacob wrote:
>>>>>>
>>>>>> On Mon, Jul 5, 2021 at 4:22 PM Bruce Richardson
>>>>>> <bruce.richardson@intel.com> wrote:
>>>>>>>
>>>>>>> On Sun, Jul 04, 2021 at 03:00:30PM +0530, Jerin Jacob wrote:
>>>>>>>> On Fri, Jul 2, 2021 at 6:51 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
>>>>>>>>>
>>>>>>>>> This patch introduces 'dmadevice' which is a generic type of DMA
>>>>>>>>> device.
>>>>> <snip>
>>>>>>>
>>>>>>> +1 and the terminology with regards to queues and channels. With our ioat
>>>>>>> hardware, each HW queue was called a channel for instance.
>>>>>>
>>>>>> Looks like <dmadev> <> <channel> can cover all the use cases, if the
>>>>>> HW has more than
>>>>>> 1 queues it can be exposed as separate dmadev dev.
>>>>>>
>>>>>
>>>>> Fine for me.
>>>>>
>>>>> However, just to confirm that Morten's suggestion of using a
>>>>> (device-specific void *) channel pointer rather than dev_id + channel_id
>>>>> pair of parameters won't work for you? You can't store a pointer or dev
>>>>> index in the channel struct in the driver?
>>>>
>>>> Yes. That will work. To confirm, the suggestion is to use, void *
>>>> object instead of channel_id,
>>>> That will avoid one more indirection.(index -> pointer)
>>>>
>>>
>>> The proposal was to use it in place of "dev_id + channel_id", i.e.
>>>
>>> copy(dev_id, ch_id, src, dst, len, flags) --> copy(ch_ptr, src, dst, len, flags)
>>>
>>> Where the channel pointer implicitly indicates the device too. However, I
>>> realise now that this would be something completely transparent to the
>>> driver as it would all have to be implemented in the dmadev level, and
>>> lead to lots of duplication of function pointers, etc. Therefore, let's
>>> just go with original scheme. :-(
>>
>> Yes. Just go with the original scheme.
>>
> +1
> 
>>>
>>>>
>>>>>
>>>>>>
>>>
>>> <snip>
>>>
>>>>>> Got it. In order to save space if first CL size for fastpath(Saving 8B
>>>>>> for the pointer) and to avoid
>>>>>> function overhead, Can we use one bit of flags of op function to
>>>>>> enable the fence?
>>>>>>
>>>>>
>>>>> The original ioat implementation did exactly that. However, I then
>>>>> discovered that because a fence logically belongs between two operations,
>>>>> does the fence flag on an operation mean "don't do any jobs after this
>>>>> until this job has completed" or does it mean "don't start this job until
>>>>> all previous jobs have completed". [Or theoretically does it mean both :-)]
>>>>> Naturally, some hardware does it the former way (i.e. fence flag goes on
>>>>> last op before fence), while other hardware the latter way (i.e. fence flag
>>>>> goes on first op after the fence). Therefore, since fencing is about
>>>>> ordering *between* two (sets of) jobs, I decided that it should do exactly
>>>>> that and go between two jobs, so there is no ambiguity!
>>>>>
>>>>> However, I'm happy enough to switch to having a fence flag, but I think if
>>>>> we do that, it should be put in the "first job after fence" case, because
>>>>> it is always easier to modify a previously written job if we need to, than
>>>>> to save the flag for a future one.
>>>>>
>>>>> Alternatively, if we keep the fence as a separate function, I'm happy
>>>>> enough for it not to be on the same cacheline as the "hot" operations,
>>>>> since fencing will always introduce a small penalty anyway.
>>>>
>>>> Ack.
>>>> You may consider two flags, FENCE_THEN_JOB and JOB_THEN_FENCE( If
>>>> there any use case for this or it makes sense for your HW)
>>>>
>>>>
>>>> For us, Fence is NOP for us as we have an implicit fence between each
>>>> HW job descriptor.
>>>>
>>>
>>> I actually still think that having a separate fence function in the "ops"
>>> section is the best approach here. It's unabiguous as to whether it's
>>> fence-before or fence-after, and if we have it in the ops, it doesn't use a
>>> "fast-path" slot.
>>>
>>> However, if we *really* want to use a flag instead, I don't see the value
>>> in having two flags, it will be really confusing.  Instead, if we do go
>>> with a flag, I think "RTE_DMA_PRE_FENCE" should be the name, indicating
>>> that the fence occurs before the job in question.
>>
>> IMO, We need to use flags and the name can be RTE_DMA_PRE_FENCE
>> due to overhead of the driver implementation where the fence request
>> can be NOP and
>> to save the first cache line occupancy.
>>
>>>
>>>>
>>>>>
>>>>>>>
>>>>>>>>
>>>>> <snip>
>>>>>>>> Since we have additional function call overhead in all the
>>>>>>>> applications for this scheme, I would like to understand
>>>>>>>> the use of doing this way vs enq does the doorbell implicitly from
>>>>>>>> driver/application PoV?
>>>>>>>>
>>>>>>>
>>>>>>> In our benchmarks it's just faster. When we tested it, the overhead of the
>>>>>>> function calls was noticably less than the cost of building up the
>>>>>>> parameter array(s) for passing the jobs in as a burst. [We don't see this
>>>>>>> cost with things like NIC I/O since DPDK tends to already have the mbuf
>>>>>>> fully populated before the TX call anyway.]
>>>>>>
>>>>>> OK. I agree with stack population.
>>>>>>
>>>>>> My question was more on doing implicit doorbell update enq. Is doorbell write
>>>>>> costly in other HW compare to a function call? In our HW, it is just write of
>>>>>> the number of instructions written in a register.
>>>>>>
>>>>>> Also, we need to again access the internal PMD memory structure to find
>>>>>> where to write etc if it is a separate function.
>>>>>>
>>>>>
>>>>> The cost varies depending on a number of factors - even writing to a single
>>>>> HW register can be very slow if that register is mapped as device
>>>>> (uncacheable) memory, since (AFAIK) it will act as a full fence and wait
>>>>
>>>> I don't know, At least in our case, writes are write-back. so core does not need
>>>> to wait.(If there is no read operation).
>>>>
>>>>> for the write to go all the way to hardware. For more modern HW, the cost
>>>>> can be lighter. However, any cost of HW writes is going to be the same
>>>>> whether its a separate function call or not.
>>>>>
>>>>> However, the main thing about the doorbell update is that it's a
>>>>> once-per-burst thing, rather than a once-per-job. Therefore, even if you
>>>>> have to re-read the struct memory (which is likely still somewhere in your
>>>>> cores' cache), any extra small cost of doing so is to be amortized over the
>>>>> cost of a whole burst of copies.
>>>>
>>>> Linux kernel has xmit_more flag in skb to address similar thing.
>>>> i.e enq job flag can have one more bit field to say update ring bell or not?
>>>> Rather having yet another function overhead.IMO, it is the best of both worlds.
>>>>
>>>
>>> It's just more conditionals and branches all through the code. Inside the
>>> user application, the user has to check whether to set the flag or not (or
>>> special-case the last transaction outside the loop), and within the driver,
>>> there has to be a branch whether or not to call the doorbell function. The
>>> code on both sides is far simpler and more readable if the doorbell
>>> function is exactly that - a separate function.
>>
>> I disagree. The reason is:
>>
>> We will have two classes of applications
>>
>> a) do dma copy request as and when it has data(I think, this is the
>> prime use case), for those,
>> I think, it is considerable overhead to have two function invocation
>> per transfer i.e
>> rte_dma_copy() and rte_dma_perform()
>>
>> b) do dma copy when the data is reached to a logical state,  like copy
>> IP frame from Ethernet packets or so,
>> In that case, the application will have  a LOGIC to detect when to
>> perform it so on the end of
>> that rte_dma_copy() flag can be updated to fire the doorbell.
>>
>> IMO, We are comparing against a branch(flag is already in register) vs
>> a set of instructions for
>> 1) function pointer overhead
>> 2) Need to use the channel context again back in another function.
>>
>> IMO, a single branch is most optimal from performance PoV.
>>
> Ok, let's try it and see how it goes.

Test result show:
1) For Kunpeng platform (ARMv8) could benefit very little with doorbell in flags
2) For Xeon E5-2690 v2 (X86) could benefit with separate function
3) Both platform could benefit with doorbell in flags if burst < 5

There is a performance gain in small bursts (<5). Given the extensive use of bursts
in DPDK applications and users are accustomed to the concept, I do not recommend
using the 'doorbell' in flags.
And also user may confuse about the doorbell operations.

Kunpeng platform test result:
    [root@SZ tmp]# ./a1 1
    burst = 1
    perform_after_multiple_enqueue: burst:1 cost:0s.554422us
    doorbell_for_every_enqueue: burst:1 cost:0s.450927us
    last_enqueue_issue_doorbell: burst:1 cost:0s.450479us
    [root@SZ tmp]#
    [root@SZ tmp]# ./a1 2
    burst = 2
    perform_after_multiple_enqueue: burst:2 cost:0s.900884us
    doorbell_for_every_enqueue: burst:2 cost:0s.866732us
    last_enqueue_issue_doorbell: burst:2 cost:0s.732469us
    [root@SZ tmp]# ./a1 5
    burst = 5
    perform_after_multiple_enqueue: burst:5 cost:1s.732410us
    doorbell_for_every_enqueue: burst:5 cost:2s.115479us
    last_enqueue_issue_doorbell: burst:5 cost:1s.759349us
    [root@SZ tmp]# ./a1 10
    burst = 10
    perform_after_multiple_enqueue: burst:10 cost:3s.490716us
    doorbell_for_every_enqueue: burst:10 cost:4s.194691us
    last_enqueue_issue_doorbell: burst:10 cost:3s.331825us
    [root@SZ tmp]# ./a1 30
    burst = 30
    perform_after_multiple_enqueue: burst:30 cost:9s.61761us
    doorbell_for_every_enqueue: burst:30 cost:12s.517082us
    last_enqueue_issue_doorbell: burst:30 cost:9s.614802us

X86 platform test result:
    fengchengwen@SZ:~/tmp$ ./a1 1
    burst = 1
    perform_after_multiple_enqueue: burst:1 cost:0s.406331us
    doorbell_for_every_enqueue: burst:1 cost:0s.331109us
    last_enqueue_issue_doorbell: burst:1 cost:0s.381782us
    fengchengwen@SZ:~/tmp$ ./a1 2
    burst = 2
    perform_after_multiple_enqueue: burst:2 cost:0s.569024us
    doorbell_for_every_enqueue: burst:2 cost:0s.643449us
    last_enqueue_issue_doorbell: burst:2 cost:0s.486639us
    fengchengwen@SZ:~/tmp$ ./a1 5
    burst = 5
    perform_after_multiple_enqueue: burst:5 cost:1s.166384us
    doorbell_for_every_enqueue: burst:5 cost:1s.602369us
    last_enqueue_issue_doorbell: burst:5 cost:1s.209392us
    fengchengwen@SZ:~/tmp$ ./a1 10
    burst = 10
    perform_after_multiple_enqueue: burst:10 cost:2s.229901us
    doorbell_for_every_enqueue: burst:10 cost:3s.754802us
    last_enqueue_issue_doorbell: burst:10 cost:2s.328705us
    fengchengwen@SZ:~/tmp$
    fengchengwen@SZ:~/tmp$ ./a1 30
    burst = 30
    perform_after_multiple_enqueue: burst:30 cost:6s.132817us
    doorbell_for_every_enqueue: burst:30 cost:9s.944619us
    last_enqueue_issue_doorbell: burst:30 cost:7s.73551us


test-code:

#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <time.h>
#include <sys/time.h>

struct dmadev;

unsigned int dev_reg[10240];
volatile unsigned int *ring;
volatile unsigned int *doorbell;

void init_global(void)
{
        ring = &dev_reg[100];
        doorbell = &dev_reg[10000];
}

#define rte_wmb() asm volatile("dmb oshst" : : : "memory")
//#define rte_wmb() asm volatile ("" : : : "memory")

typedef int (*enqueue_t)(struct dmadev *dev, int vchan, void *src, void *dst, int len, int flags);
typedef void (*perform_t)(struct dmadev *dev, int vchan);

struct dmadev {
        enqueue_t enqueue;
        perform_t perform;
        char rsv[512];
};

int hisi_dma_enqueue(struct dmadev *dev, int vchan, void *src, void *dst, int len, int flags)
{
        *ring = 1;
}

int hisi_dma_enqueue_doorbell(struct dmadev *dev, int vchan, void *src, void *dst, int len, int flags)
{
        *ring = 1;
        if (flags == 1) {
                rte_wmb();
                *doorbell = 1;
        }
}

void hisi_dma_perform(struct dmadev *dev, int vchan)
{
        rte_wmb();
        *doorbell = 1;
}

struct dmadev devlist[64];

void init_devlist(bool enq_doorbell)
{
        int i;
        for (i = 0; i < 64; i++) {
                devlist[i].enqueue = enq_doorbell ? hisi_dma_enqueue_doorbell : hisi_dma_enqueue;
                devlist[i].perform = hisi_dma_perform;
        }
}

static inline int dma_enqueue(int dev_id, int vchan, void *src, void *dst, int len, int flags)
{
        struct dmadev *dev = &devlist[dev_id];
        return dev->enqueue(dev, vchan, src, dst, len, flags);
}

static inline void dma_perform(int dev_id, int vchan)
{
        struct dmadev *dev = &devlist[dev_id];
        return dev->perform(dev, vchan);
}

#define MAX_LOOPS       90000000

void test_for_perform_after_multiple_enqueue(int burst)
{
        struct timeval start, end, delta;
        unsigned int i, j;
        init_devlist(false);
        gettimeofday(&start, NULL);
        for (i = 0; i < MAX_LOOPS; i++) {
                for (j = 0; j < burst; j++)
                        (void)dma_enqueue(10, 0, NULL, NULL, 0, 0);
                dma_perform(10, 0);
        }
        gettimeofday(&end, NULL);
        timersub(&end, &start, &delta);
        printf("perform_after_multiple_enqueue: burst:%d cost:%us.%uus \n", burst, delta.tv_sec, delta.tv_usec);
}

void test_for_doorbell_for_every_enqueue(int burst)
{
        struct timeval start, end, delta;
        unsigned int i, j;
        init_devlist(true);
        gettimeofday(&start, NULL);
        for (i = 0; i < MAX_LOOPS; i++) {
                for (j = 0; j < burst; j++)
                        (void)dma_enqueue(10, 0, NULL, NULL, 0, 1);
        }
        gettimeofday(&end, NULL);
        timersub(&end, &start, &delta);
        printf("doorbell_for_every_enqueue: burst:%d cost:%us.%uus \n", burst, delta.tv_sec, delta.tv_usec);
}

void test_for_last_enqueue_issue_doorbell(int burst)
{
        struct timeval start, end, delta;
        unsigned int i, j;
        init_devlist(true);
        gettimeofday(&start, NULL);
        for (i = 0; i < MAX_LOOPS; i++) {
                for (j = 0; j < burst - 1; j++)
                        (void)dma_enqueue(10, 0, NULL, NULL, 0, 0);
                dma_enqueue(10, 0, NULL, NULL, 0, 1);
        }
        gettimeofday(&end, NULL);
        timersub(&end, &start, &delta);
        printf("last_enqueue_issue_doorbell: burst:%d cost:%us.%uus \n", burst, delta.tv_sec, delta.tv_usec);
}

void main(int argc, char *argv[])
{
        if (argc < 2) {
                printf("please input burst parameter!\n");
                return;
        }
        init_global();
        int burst = atol(argv[1]);
        printf("burst = %d \n", burst);
        test_for_perform_after_multiple_enqueue(burst);
        test_for_doorbell_for_every_enqueue(burst);
        test_for_last_enqueue_issue_doorbell(burst);
}

> 
>>
>>>
>>>>
>>>>>
>>>>>>
>>>>>>>
>>>>>>>>
>>>>> <snip>
>>>>>>>>> + +/** + * @warning + * @b EXPERIMENTAL: this API may change
>>>>>>>>> without prior notice.  + * + * Returns the number of operations
>>>>>>>>> that failed to complete.  + * NOTE: This API was used when
>>>>>>>>> rte_dmadev_completed has_error was set.  + * + * @param dev_id
>>>>>>>>> + *   The identifier of the device.  + * @param vq_id + *   The
>>>>>>>>> identifier of virt queue.
>>>>>>>> (> + * @param nb_status
>>>>>>>>> + *   Indicates the size  of status array.  + * @param[out]
>>>>>>>>> status + *   The error code of operations that failed to
>>>>>>>>> complete.  + * @param[out] cookie + *   The last failed
>>>>>>>>> completed operation's cookie.  + * + * @return + *   The number
>>>>>>>>> of operations that failed to complete.  + * + * NOTE: The
>>>>>>>>> caller must ensure that the input parameter is valid and the +
>>>>>>>>> *       corresponding device supports the operation.  + */
>>>>>>>>> +__rte_experimental +static inline uint16_t
>>>>>>>>> +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vq_id, +
>>>>>>>>> const uint16_t nb_status, uint32_t *status, +
>>>>>>>>> dma_cookie_t *cookie)
>>>>>>>>
>>>>>>>> IMO, it is better to move cookie/rind_idx at 3.  Why it would
>>>>>>>> return any array of errors? since it called after
>>>>>>>> rte_dmadev_completed() has has_error. Is it better to change
>>>>>>>>
>>>>>>>> rte_dmadev_error_status((uint16_t dev_id, uint16_t vq_id,
>>>>>>>> dma_cookie_t *cookie,  uint32_t *status)
>>>>>>>>
>>>>>>>> I also think, we may need to set status as bitmask and enumerate
>>>>>>>> all the combination of error codes of all the driver and return
>>>>>>>> string from driver existing rte_flow_error
>>>>>>>>
>>>>>>>> See struct rte_flow_error { enum rte_flow_error_type type; /**<
>>>>>>>> Cause field and error types. */ const void *cause; /**< Object
>>>>>>>> responsible for the error. */ const char *message; /**<
>>>>>>>> Human-readable error message. */ };
>>>>>>>>
>>>>>>>
>>>>>>> I think we need a multi-return value API here, as we may add
>>>>>>> operations in future which have non-error status values to return.
>>>>>>> The obvious case is DMA engines which support "compare" operations.
>>>>>>> In that case a successful compare (as in there were no DMA or HW
>>>>>>> errors) can return "equal" or "not-equal" as statuses. For general
>>>>>>> "copy" operations, the faster completion op can be used to just
>>>>>>> return successful values (and only call this status version on
>>>>>>> error), while apps using those compare ops or a mixture of copy and
>>>>>>> compare ops, would always use the slower one that returns status
>>>>>>> values for each and every op..
>>>>>>>
>>>>>>> The ioat APIs used 32-bit integer values for this status array so
>>>>>>> as to allow e.g. 16-bits for error code and 16-bits for future
>>>>>>> status values. For most operations there should be a fairly small
>>>>>>> set of things that can go wrong, i.e. bad source address, bad
>>>>>>> destination address or invalid length.  Within that we may have a
>>>>>>> couple of specifics for why an address is bad, but even so I don't
>>>>>>> think we need to start having multiple bit combinations.
>>>>>>
>>>>>> OK. What is the purpose of errors status? Is it for application
>>>>>> printing it or Does the application need to take any action based on
>>>>>> specific error requests?
>>>>>
>>>>> It's largely for information purposes, but in the case of SVA/SVM
>>>>> errors could occur due to the memory not being pinned, i.e. a page
>>>>> fault, in some cases. If that happens, then it's up the app to either
>>>>> touch the memory and retry the copy, or to do a SW memcpy as a
>>>>> fallback.
>>>>>
>>>>> In other error cases, I think it's good to tell the application if it's
>>>>> passing around bad data, or data that is beyond the scope of hardware,
>>>>> e.g.  a copy that is beyond what can be done in a single transaction
>>>>> for a HW instance. Given that there are always things that can go
>>>>> wrong, I think we need some error reporting mechanism.
>>>>>
>>>>>> If the former is scope, then we need to define the standard enum
>>>>>> value for the error right?  ie. uint32_t *status needs to change to
>>>>>> enum rte_dma_error or so.
>>>>>>
>>>>> Sure. Perhaps an error/status structure either is an option, where we
>>>>> explicitly call out error info from status info.
>>>>
>>>> Agree. Better to have a structure with filed like,
>>>>
>>>> 1)  enum rte_dma_error_type 2)  memory to store, informative message on
>>>> fine aspects of error.  LIke address caused issue etc.(Which will be
>>>> driver-specific information).
>>>>
>>> The only issue I have with that is that once we have driver specific
>>> information it is of little use to the application, since it can't know
>>> anything about it excepy maybe log it.  I'd much rather have a set of error
>>> codes telling user that "source address is wrong", "dest address is wrong",
>>> and a generic "an address is wrong" in case driver/HW cannot distinguish
>>> source of error. Can we see how far we get with just error codes before we
>>> start into passing string messages around and all the memory management
>>> headaches that implies.
>>
>> Works for me. It should be "enum rte_dma_error_type" then, which has a standard
>> error type. Which is missing in the spec now.
>>
> +1
> .
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-08  3:11                 ` fengchengwen
@ 2021-07-08 18:35                   ` Jerin Jacob
  2021-07-09  9:14                     ` Bruce Richardson
  0 siblings, 1 reply; 339+ messages in thread
From: Jerin Jacob @ 2021-07-08 18:35 UTC (permalink / raw)
  To: fengchengwen
  Cc: Bruce Richardson, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	dpdk-dev, Morten Brørup, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma,
	Radha Mohan Chintakuntla

On Thu, Jul 8, 2021 at 8:41 AM fengchengwen <fengchengwen@huawei.com> wrote:
>

> >>>
> >>> It's just more conditionals and branches all through the code. Inside the
> >>> user application, the user has to check whether to set the flag or not (or
> >>> special-case the last transaction outside the loop), and within the driver,
> >>> there has to be a branch whether or not to call the doorbell function. The
> >>> code on both sides is far simpler and more readable if the doorbell
> >>> function is exactly that - a separate function.
> >>
> >> I disagree. The reason is:
> >>
> >> We will have two classes of applications
> >>
> >> a) do dma copy request as and when it has data(I think, this is the
> >> prime use case), for those,
> >> I think, it is considerable overhead to have two function invocation
> >> per transfer i.e
> >> rte_dma_copy() and rte_dma_perform()
> >>
> >> b) do dma copy when the data is reached to a logical state,  like copy
> >> IP frame from Ethernet packets or so,
> >> In that case, the application will have  a LOGIC to detect when to
> >> perform it so on the end of
> >> that rte_dma_copy() flag can be updated to fire the doorbell.
> >>
> >> IMO, We are comparing against a branch(flag is already in register) vs
> >> a set of instructions for
> >> 1) function pointer overhead
> >> 2) Need to use the channel context again back in another function.
> >>
> >> IMO, a single branch is most optimal from performance PoV.
> >>
> > Ok, let's try it and see how it goes.
>
> Test result show:
> 1) For Kunpeng platform (ARMv8) could benefit very little with doorbell in flags
> 2) For Xeon E5-2690 v2 (X86) could benefit with separate function
> 3) Both platform could benefit with doorbell in flags if burst < 5
>
> There is a performance gain in small bursts (<5). Given the extensive use of bursts
 in DPDK applications and users are accustomed to the concept, I do
not recommend
> using the 'doorbell' in flags.

There is NO concept change between one option vs other option. Just
argument differnet.
Also, _perform() scheme not used anywhere in DPDK. I

Regarding performance, I have added dummy instructions to simulate the real work
load[1], now burst also has some gain in both x86 and arm64[3]

I have modified your application[2] to dpdk test application to use
cpu isolation etc.
So this is gain in flag scheme ad code is checked in to Github[2[

[1]
static inline void
delay(void)
{
        volatile int k;

        for (k = 0; k < 16; k++) {


      }

}

__rte_noinline
int
hisi_dma_enqueue(struct dmadev *dev, int vchan, void *src, void *dst,
int len, const int flags)
{
         delay();

        *ring = 1;

         return 0;
}

__rte_noinline
int
hisi_dma_enqueue_doorbell(struct dmadev *dev, int vchan, void *src,
void *dst, int len, const int flags)
{
        delay();

        *ring = 1;

        if (unlikely(flags == 1)) {
                rte_wmb();
                *doorbell = 1;
        }
      return 0;
}


[2]

https://github.com/jerinjacobk/dpdk-dmatest/commit/4fc9bc3029543bbc4caaa5183d98bac93c34f588

Update results [3]

Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.60GHz

echo "dma_perf_autotest" | ./build/app/test/dpdk-test --no-huge -c 0xf00

core=24 Timer running at 2600.00MHz
   test_for_perform_after_multiple_enqueue: burst=1 cycles=46.000000
      test_for_last_enqueue_issue_doorbell: burst=1 cycles=45.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=2 cycles=90.000000
      test_for_last_enqueue_issue_doorbell: burst=2 cycles=89.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=3 cycles=134.000000
      test_for_last_enqueue_issue_doorbell: burst=3 cycles=133.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=4 cycles=177.000000
      test_for_last_enqueue_issue_doorbell: burst=4 cycles=176.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=5 cycles=221.000000
      test_for_last_enqueue_issue_doorbell: burst=5 cycles=221.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=6 cycles=265.000000
      test_for_last_enqueue_issue_doorbell: burst=6 cycles=265.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=7 cycles=333.000000
      test_for_last_enqueue_issue_doorbell: burst=7 cycles=309.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=8 cycles=375.000000
      test_for_last_enqueue_issue_doorbell: burst=8 cycles=373.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=9 cycles=418.000000
      test_for_last_enqueue_issue_doorbell: burst=9 cycles=414.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=10 cycles=462.000000
      test_for_last_enqueue_issue_doorbell: burst=10 cycles=458.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=11 cycles=507.000000
      test_for_last_enqueue_issue_doorbell: burst=11 cycles=501.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=12 cycles=552.000000
      test_for_last_enqueue_issue_doorbell: burst=12 cycles=546.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=13 cycles=593.000000
      test_for_last_enqueue_issue_doorbell: burst=13 cycles=590.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=14 cycles=638.000000
      test_for_last_enqueue_issue_doorbell: burst=14 cycles=634.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=15 cycles=681.000000
      test_for_last_enqueue_issue_doorbell: burst=15 cycles=678.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=16 cycles=725.000000
      test_for_last_enqueue_issue_doorbell: burst=16 cycles=722.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=17 cycles=770.000000
      test_for_last_enqueue_issue_doorbell: burst=17 cycles=767.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=18 cycles=815.000000
      test_for_last_enqueue_issue_doorbell: burst=18 cycles=812.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=19 cycles=857.000000
      test_for_last_enqueue_issue_doorbell: burst=19 cycles=854.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=20 cycles=902.000000
      test_for_last_enqueue_issue_doorbell: burst=20 cycles=899.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=21 cycles=945.000000
      test_for_last_enqueue_issue_doorbell: burst=21 cycles=943.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=22 cycles=990.000000
      test_for_last_enqueue_issue_doorbell: burst=22 cycles=988.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=23 cycles=1033.000000
      test_for_last_enqueue_issue_doorbell: burst=23 cycles=1031.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=24 cycles=1077.000000
      test_for_last_enqueue_issue_doorbell: burst=24 cycles=1075.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=25 cycles=1121.000000
      test_for_last_enqueue_issue_doorbell: burst=25 cycles=1119.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=26 cycles=1166.000000
      test_for_last_enqueue_issue_doorbell: burst=26 cycles=1163.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=27 cycles=1208.000000
      test_for_last_enqueue_issue_doorbell: burst=27 cycles=1208.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=28 cycles=1252.000000
      test_for_last_enqueue_issue_doorbell: burst=28 cycles=1252.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=29 cycles=1295.000000
      test_for_last_enqueue_issue_doorbell: burst=29 cycles=1295.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=30 cycles=1342.000000
      test_for_last_enqueue_issue_doorbell: burst=30 cycles=1340.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=31 cycles=1386.000000
      test_for_last_enqueue_issue_doorbell: burst=31 cycles=1384.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=32 cycles=1429.000000
      test_for_last_enqueue_issue_doorbell: burst=32 cycles=1428.000000
-------------------------------------------------------------------------------



octeontx2:

See https://doc.dpdk.org/guides/prog_guide/profile_app.html section
62.2.3. High-resolution cycle counter


meson --cross config/arm/arm64_octeontx2_linux_gcc
-Dc_args='-DRTE_ARM_EAL_RDTSC_USE_PMU' build

 echo "dma_perf_autotest" | ./build/app/test/dpdk-test --no-huge -c 0xff0000
RTE>>dma_perf_autotest^M
lcore=16 Timer running at 2400.00MHz
   test_for_perform_after_multiple_enqueue: burst=1 cycles=105.000000
      test_for_last_enqueue_issue_doorbell: burst=1 cycles=105.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=2 cycles=207.000000
      test_for_last_enqueue_issue_doorbell: burst=2 cycles=207.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=3 cycles=309.000000
      test_for_last_enqueue_issue_doorbell: burst=3 cycles=310.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=4 cycles=411.000000
      test_for_last_enqueue_issue_doorbell: burst=4 cycles=410.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=5 cycles=513.000000
      test_for_last_enqueue_issue_doorbell: burst=5 cycles=512.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=6 cycles=615.000000
      test_for_last_enqueue_issue_doorbell: burst=6 cycles=615.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=7 cycles=717.000000
      test_for_last_enqueue_issue_doorbell: burst=7 cycles=716.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=8 cycles=819.000000
      test_for_last_enqueue_issue_doorbell: burst=8 cycles=818.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=9 cycles=921.000000
      test_for_last_enqueue_issue_doorbell: burst=9 cycles=922.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=10 cycles=1023.000000
      test_for_last_enqueue_issue_doorbell: burst=10 cycles=1022.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=11 cycles=1126.000000
      test_for_last_enqueue_issue_doorbell: burst=11 cycles=1124.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=12 cycles=1227.000000
      test_for_last_enqueue_issue_doorbell: burst=12 cycles=1227.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=13 cycles=1329.000000
      test_for_last_enqueue_issue_doorbell: burst=13 cycles=1328.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=14 cycles=1431.000000
      test_for_last_enqueue_issue_doorbell: burst=14 cycles=1430.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=15 cycles=1534.000000
      test_for_last_enqueue_issue_doorbell: burst=15 cycles=1534.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=16 cycles=1638.000000
      test_for_last_enqueue_issue_doorbell: burst=16 cycles=1640.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=17 cycles=1746.000000
      test_for_last_enqueue_issue_doorbell: burst=17 cycles=1739.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=18 cycles=1847.000000
      test_for_last_enqueue_issue_doorbell: burst=18 cycles=1841.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=19 cycles=1950.000000
      test_for_last_enqueue_issue_doorbell: burst=19 cycles=1944.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=20 cycles=2051.000000
       test_for_last_enqueue_issue_doorbell: burst=20 cycles=2045.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=21 cycles=2154.000000
      test_for_last_enqueue_issue_doorbell: burst=21 cycles=2148.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=22 cycles=2257.000000
      test_for_last_enqueue_issue_doorbell: burst=22 cycles=2249.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=23 cycles=2358.000000
      test_for_last_enqueue_issue_doorbell: burst=23 cycles=2352.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=24 cycles=2459.000000
      test_for_last_enqueue_issue_doorbell: burst=24 cycles=2454.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=25 cycles=2562.000000
      test_for_last_enqueue_issue_doorbell: burst=25 cycles=2555.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=26 cycles=2665.000000
      test_for_last_enqueue_issue_doorbell: burst=26 cycles=2657.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=27 cycles=2766.000000
      test_for_last_enqueue_issue_doorbell: burst=27 cycles=2760.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=28 cycles=2867.000000
      test_for_last_enqueue_issue_doorbell: burst=28 cycles=2861.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=29 cycles=2970.000000
      test_for_last_enqueue_issue_doorbell: burst=29 cycles=2964.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=30 cycles=3073.000000
      test_for_last_enqueue_issue_doorbell: burst=30 cycles=3065.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=31 cycles=3174.000000
      test_for_last_enqueue_issue_doorbell: burst=31 cycles=3168.000000
-------------------------------------------------------------------------------
   test_for_perform_after_multiple_enqueue: burst=32 cycles=3275.000000
      test_for_last_enqueue_issue_doorbell: burst=32 cycles=3269.000000
-------------------------------------------------------------------------------
Test OK
RTE>




> And also user may confuse about the doorbell operations.
>
> Kunpeng platform test result:
>     [root@SZ tmp]# ./a1 1
>     burst = 1
>     perform_after_multiple_enqueue: burst:1 cost:0s.554422us
>     doorbell_for_every_enqueue: burst:1 cost:0s.450927us
>     last_enqueue_issue_doorbell: burst:1 cost:0s.450479us
>     [root@SZ tmp]#
>     [root@SZ tmp]# ./a1 2
>     burst = 2
>     perform_after_multiple_enqueue: burst:2 cost:0s.900884us
>     doorbell_for_every_enqueue: burst:2 cost:0s.866732us
>     last_enqueue_issue_doorbell: burst:2 cost:0s.732469us
>     [root@SZ tmp]# ./a1 5
>     burst = 5
>     perform_after_multiple_enqueue: burst:5 cost:1s.732410us
>     doorbell_for_every_enqueue: burst:5 cost:2s.115479us
>     last_enqueue_issue_doorbell: burst:5 cost:1s.759349us
>     [root@SZ tmp]# ./a1 10
>     burst = 10
>     perform_after_multiple_enqueue: burst:10 cost:3s.490716us
>     doorbell_for_every_enqueue: burst:10 cost:4s.194691us
>     last_enqueue_issue_doorbell: burst:10 cost:3s.331825us
>     [root@SZ tmp]# ./a1 30
>     burst = 30
>     perform_after_multiple_enqueue: burst:30 cost:9s.61761us
>     doorbell_for_every_enqueue: burst:30 cost:12s.517082us
>     last_enqueue_issue_doorbell: burst:30 cost:9s.614802us
>
> X86 platform test result:
>     fengchengwen@SZ:~/tmp$ ./a1 1
>     burst = 1
>     perform_after_multiple_enqueue: burst:1 cost:0s.406331us
>     doorbell_for_every_enqueue: burst:1 cost:0s.331109us
>     last_enqueue_issue_doorbell: burst:1 cost:0s.381782us
>     fengchengwen@SZ:~/tmp$ ./a1 2
>     burst = 2
>     perform_after_multiple_enqueue: burst:2 cost:0s.569024us
>     doorbell_for_every_enqueue: burst:2 cost:0s.643449us
>     last_enqueue_issue_doorbell: burst:2 cost:0s.486639us
>     fengchengwen@SZ:~/tmp$ ./a1 5
>     burst = 5
>     perform_after_multiple_enqueue: burst:5 cost:1s.166384us
>     doorbell_for_every_enqueue: burst:5 cost:1s.602369us
>     last_enqueue_issue_doorbell: burst:5 cost:1s.209392us
>     fengchengwen@SZ:~/tmp$ ./a1 10
>     burst = 10
>     perform_after_multiple_enqueue: burst:10 cost:2s.229901us
>     doorbell_for_every_enqueue: burst:10 cost:3s.754802us
>     last_enqueue_issue_doorbell: burst:10 cost:2s.328705us
>     fengchengwen@SZ:~/tmp$
>     fengchengwen@SZ:~/tmp$ ./a1 30
>     burst = 30
>     perform_after_multiple_enqueue: burst:30 cost:6s.132817us
>     doorbell_for_every_enqueue: burst:30 cost:9s.944619us
>     last_enqueue_issue_doorbell: burst:30 cost:7s.73551us
>
>
> test-code:
>
> #include <stdio.h>
> #include <stdlib.h>
> #include <stdbool.h>
> #include <time.h>
> #include <sys/time.h>
>
> struct dmadev;
>
> unsigned int dev_reg[10240];
> volatile unsigned int *ring;
> volatile unsigned int *doorbell;
>
> void init_global(void)
> {
>         ring = &dev_reg[100];
>         doorbell = &dev_reg[10000];
> }
>
> #define rte_wmb() asm volatile("dmb oshst" : : : "memory")
> //#define rte_wmb() asm volatile ("" : : : "memory")
>
> typedef int (*enqueue_t)(struct dmadev *dev, int vchan, void *src, void *dst, int len, int flags);
> typedef void (*perform_t)(struct dmadev *dev, int vchan);
>
> struct dmadev {
>         enqueue_t enqueue;
>         perform_t perform;
>         char rsv[512];
> };
>
> int hisi_dma_enqueue(struct dmadev *dev, int vchan, void *src, void *dst, int len, int flags)
> {
>         *ring = 1;
> }
>
> int hisi_dma_enqueue_doorbell(struct dmadev *dev, int vchan, void *src, void *dst, int len, int flags)
> {
>         *ring = 1;
>         if (flags == 1) {
>                 rte_wmb();
>                 *doorbell = 1;
>         }
> }
>
> void hisi_dma_perform(struct dmadev *dev, int vchan)
> {
>         rte_wmb();
>         *doorbell = 1;
> }
>
> struct dmadev devlist[64];
>
> void init_devlist(bool enq_doorbell)
> {
>         int i;
>         for (i = 0; i < 64; i++) {
>                 devlist[i].enqueue = enq_doorbell ? hisi_dma_enqueue_doorbell : hisi_dma_enqueue;
>                 devlist[i].perform = hisi_dma_perform;
>         }
> }
>
> static inline int dma_enqueue(int dev_id, int vchan, void *src, void *dst, int len, int flags)
> {
>         struct dmadev *dev = &devlist[dev_id];
>         return dev->enqueue(dev, vchan, src, dst, len, flags);
> }
>
> static inline void dma_perform(int dev_id, int vchan)
> {
>         struct dmadev *dev = &devlist[dev_id];
>         return dev->perform(dev, vchan);
> }
>
> #define MAX_LOOPS       90000000
>
> void test_for_perform_after_multiple_enqueue(int burst)
> {
>         struct timeval start, end, delta;
>         unsigned int i, j;
>         init_devlist(false);
>         gettimeofday(&start, NULL);
>         for (i = 0; i < MAX_LOOPS; i++) {
>                 for (j = 0; j < burst; j++)
>                         (void)dma_enqueue(10, 0, NULL, NULL, 0, 0);
>                 dma_perform(10, 0);
>         }
>         gettimeofday(&end, NULL);
>         timersub(&end, &start, &delta);
>         printf("perform_after_multiple_enqueue: burst:%d cost:%us.%uus \n", burst, delta.tv_sec, delta.tv_usec);
> }
>
> void test_for_doorbell_for_every_enqueue(int burst)
> {
>         struct timeval start, end, delta;
>         unsigned int i, j;
>         init_devlist(true);
>         gettimeofday(&start, NULL);
>         for (i = 0; i < MAX_LOOPS; i++) {
>                 for (j = 0; j < burst; j++)
>                         (void)dma_enqueue(10, 0, NULL, NULL, 0, 1);
>         }
>         gettimeofday(&end, NULL);
>         timersub(&end, &start, &delta);
>         printf("doorbell_for_every_enqueue: burst:%d cost:%us.%uus \n", burst, delta.tv_sec, delta.tv_usec);
> }
>
> void test_for_last_enqueue_issue_doorbell(int burst)
> {
>         struct timeval start, end, delta;
>         unsigned int i, j;
>         init_devlist(true);
>         gettimeofday(&start, NULL);
>         for (i = 0; i < MAX_LOOPS; i++) {
>                 for (j = 0; j < burst - 1; j++)
>                         (void)dma_enqueue(10, 0, NULL, NULL, 0, 0);
>                 dma_enqueue(10, 0, NULL, NULL, 0, 1);
>         }
>         gettimeofday(&end, NULL);
>         timersub(&end, &start, &delta);
>         printf("last_enqueue_issue_doorbell: burst:%d cost:%us.%uus \n", burst, delta.tv_sec, delta.tv_usec);
> }
>
> void main(int argc, char *argv[])
> {
>         if (argc < 2) {
>                 printf("please input burst parameter!\n");
>                 return;
>         }
>         init_global();
>         int burst = atol(argv[1]);
>         printf("burst = %d \n", burst);
>         test_for_perform_after_multiple_enqueue(burst);
>         test_for_doorbell_for_every_enqueue(burst);
>         test_for_last_enqueue_issue_doorbell(burst);
> }
>
> >
> >>
> >>>
> >>>>
> >>>>>
> >>>>>>
> >>>>>>>
> >>>>>>>>
> >>>>> <snip>
> >>>>>>>>> + +/** + * @warning + * @b EXPERIMENTAL: this API may change
> >>>>>>>>> without prior notice.  + * + * Returns the number of operations
> >>>>>>>>> that failed to complete.  + * NOTE: This API was used when
> >>>>>>>>> rte_dmadev_completed has_error was set.  + * + * @param dev_id
> >>>>>>>>> + *   The identifier of the device.  + * @param vq_id + *   The
> >>>>>>>>> identifier of virt queue.
> >>>>>>>> (> + * @param nb_status
> >>>>>>>>> + *   Indicates the size  of status array.  + * @param[out]
> >>>>>>>>> status + *   The error code of operations that failed to
> >>>>>>>>> complete.  + * @param[out] cookie + *   The last failed
> >>>>>>>>> completed operation's cookie.  + * + * @return + *   The number
> >>>>>>>>> of operations that failed to complete.  + * + * NOTE: The
> >>>>>>>>> caller must ensure that the input parameter is valid and the +
> >>>>>>>>> *       corresponding device supports the operation.  + */
> >>>>>>>>> +__rte_experimental +static inline uint16_t
> >>>>>>>>> +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vq_id, +
> >>>>>>>>> const uint16_t nb_status, uint32_t *status, +
> >>>>>>>>> dma_cookie_t *cookie)
> >>>>>>>>
> >>>>>>>> IMO, it is better to move cookie/rind_idx at 3.  Why it would
> >>>>>>>> return any array of errors? since it called after
> >>>>>>>> rte_dmadev_completed() has has_error. Is it better to change
> >>>>>>>>
> >>>>>>>> rte_dmadev_error_status((uint16_t dev_id, uint16_t vq_id,
> >>>>>>>> dma_cookie_t *cookie,  uint32_t *status)
> >>>>>>>>
> >>>>>>>> I also think, we may need to set status as bitmask and enumerate
> >>>>>>>> all the combination of error codes of all the driver and return
> >>>>>>>> string from driver existing rte_flow_error
> >>>>>>>>
> >>>>>>>> See struct rte_flow_error { enum rte_flow_error_type type; /**<
> >>>>>>>> Cause field and error types. */ const void *cause; /**< Object
> >>>>>>>> responsible for the error. */ const char *message; /**<
> >>>>>>>> Human-readable error message. */ };
> >>>>>>>>
> >>>>>>>
> >>>>>>> I think we need a multi-return value API here, as we may add
> >>>>>>> operations in future which have non-error status values to return.
> >>>>>>> The obvious case is DMA engines which support "compare" operations.
> >>>>>>> In that case a successful compare (as in there were no DMA or HW
> >>>>>>> errors) can return "equal" or "not-equal" as statuses. For general
> >>>>>>> "copy" operations, the faster completion op can be used to just
> >>>>>>> return successful values (and only call this status version on
> >>>>>>> error), while apps using those compare ops or a mixture of copy and
> >>>>>>> compare ops, would always use the slower one that returns status
> >>>>>>> values for each and every op..
> >>>>>>>
> >>>>>>> The ioat APIs used 32-bit integer values for this status array so
> >>>>>>> as to allow e.g. 16-bits for error code and 16-bits for future
> >>>>>>> status values. For most operations there should be a fairly small
> >>>>>>> set of things that can go wrong, i.e. bad source address, bad
> >>>>>>> destination address or invalid length.  Within that we may have a
> >>>>>>> couple of specifics for why an address is bad, but even so I don't
> >>>>>>> think we need to start having multiple bit combinations.
> >>>>>>
> >>>>>> OK. What is the purpose of errors status? Is it for application
> >>>>>> printing it or Does the application need to take any action based on
> >>>>>> specific error requests?
> >>>>>
> >>>>> It's largely for information purposes, but in the case of SVA/SVM
> >>>>> errors could occur due to the memory not being pinned, i.e. a page
> >>>>> fault, in some cases. If that happens, then it's up the app to either
> >>>>> touch the memory and retry the copy, or to do a SW memcpy as a
> >>>>> fallback.
> >>>>>
> >>>>> In other error cases, I think it's good to tell the application if it's
> >>>>> passing around bad data, or data that is beyond the scope of hardware,
> >>>>> e.g.  a copy that is beyond what can be done in a single transaction
> >>>>> for a HW instance. Given that there are always things that can go
> >>>>> wrong, I think we need some error reporting mechanism.
> >>>>>
> >>>>>> If the former is scope, then we need to define the standard enum
> >>>>>> value for the error right?  ie. uint32_t *status needs to change to
> >>>>>> enum rte_dma_error or so.
> >>>>>>
> >>>>> Sure. Perhaps an error/status structure either is an option, where we
> >>>>> explicitly call out error info from status info.
> >>>>
> >>>> Agree. Better to have a structure with filed like,
> >>>>
> >>>> 1)  enum rte_dma_error_type 2)  memory to store, informative message on
> >>>> fine aspects of error.  LIke address caused issue etc.(Which will be
> >>>> driver-specific information).
> >>>>
> >>> The only issue I have with that is that once we have driver specific
> >>> information it is of little use to the application, since it can't know
> >>> anything about it excepy maybe log it.  I'd much rather have a set of error
> >>> codes telling user that "source address is wrong", "dest address is wrong",
> >>> and a generic "an address is wrong" in case driver/HW cannot distinguish
> >>> source of error. Can we see how far we get with just error codes before we
> >>> start into passing string messages around and all the memory management
> >>> headaches that implies.
> >>
> >> Works for me. It should be "enum rte_dma_error_type" then, which has a standard
> >> error type. Which is missing in the spec now.
> >>
> > +1
> > .
> >

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-08 18:35                   ` Jerin Jacob
@ 2021-07-09  9:14                     ` Bruce Richardson
  2021-07-11  7:14                       ` Jerin Jacob
  0 siblings, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-07-09  9:14 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: fengchengwen, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	dpdk-dev, Morten Brørup, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma,
	Radha Mohan Chintakuntla

On Fri, Jul 09, 2021 at 12:05:40AM +0530, Jerin Jacob wrote:
> On Thu, Jul 8, 2021 at 8:41 AM fengchengwen <fengchengwen@huawei.com> wrote:
> >
> 
> > >>>
> > >>> It's just more conditionals and branches all through the code. Inside the
> > >>> user application, the user has to check whether to set the flag or not (or
> > >>> special-case the last transaction outside the loop), and within the driver,
> > >>> there has to be a branch whether or not to call the doorbell function. The
> > >>> code on both sides is far simpler and more readable if the doorbell
> > >>> function is exactly that - a separate function.
> > >>
> > >> I disagree. The reason is:
> > >>
> > >> We will have two classes of applications
> > >>
> > >> a) do dma copy request as and when it has data(I think, this is the
> > >> prime use case), for those,
> > >> I think, it is considerable overhead to have two function invocation
> > >> per transfer i.e
> > >> rte_dma_copy() and rte_dma_perform()
> > >>
> > >> b) do dma copy when the data is reached to a logical state,  like copy
> > >> IP frame from Ethernet packets or so,
> > >> In that case, the application will have  a LOGIC to detect when to
> > >> perform it so on the end of
> > >> that rte_dma_copy() flag can be updated to fire the doorbell.
> > >>
> > >> IMO, We are comparing against a branch(flag is already in register) vs
> > >> a set of instructions for
> > >> 1) function pointer overhead
> > >> 2) Need to use the channel context again back in another function.
> > >>
> > >> IMO, a single branch is most optimal from performance PoV.
> > >>
> > > Ok, let's try it and see how it goes.
> >
> > Test result show:
> > 1) For Kunpeng platform (ARMv8) could benefit very little with doorbell in flags
> > 2) For Xeon E5-2690 v2 (X86) could benefit with separate function
> > 3) Both platform could benefit with doorbell in flags if burst < 5
> >
> > There is a performance gain in small bursts (<5). Given the extensive use of bursts
>  in DPDK applications and users are accustomed to the concept, I do
> not recommend
> > using the 'doorbell' in flags.
> 
> There is NO concept change between one option vs other option. Just
> argument differnet.
> Also, _perform() scheme not used anywhere in DPDK. I
> 
> Regarding performance, I have added dummy instructions to simulate the real work
> load[1], now burst also has some gain in both x86 and arm64[3]
> 
> I have modified your application[2] to dpdk test application to use
> cpu isolation etc.
> So this is gain in flag scheme ad code is checked in to Github[2[
> 
<snip>

The benchmark numbers all seem very close between the two schemes. On my
team we pretty much have test ioat & idxd drivers ported internally to the
last dmadev draft library, and have sample apps handling traffic using
those. I'll therefore attempt to get these numbers with real traffic on
real drivers to just double check that it's the same as these
microbenchmarks.

Assuming that perf is the same, how to resolve this? Some thoughts:
* As I understand it, the main objection to the separate doorbell function
  is the use of 8-bytes in fastpath slot. Therefore I will also attempt to
  benchmark having the doorbell function not on the same cacheline and check
  perf impact, if any.
* If we don't have a impact to perf by having the doorbell function inside
  the regular "ops" rather than on fastpath cacheline, there is no reason
  we can't implement both schemes. The user can then choose themselves
  whether to doorbell using a flag on last item, or to doorbell explicitly
  using function call.

Of the two schemes, and assuming they are equal, I do have a preference for
the separate function one, primarily from a code readability point of view.
Other than that, I have no strong opinions.

/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-09  9:14                     ` Bruce Richardson
@ 2021-07-11  7:14                       ` Jerin Jacob
  2021-07-12  7:01                         ` Morten Brørup
  0 siblings, 1 reply; 339+ messages in thread
From: Jerin Jacob @ 2021-07-11  7:14 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: fengchengwen, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	dpdk-dev, Morten Brørup, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma,
	Radha Mohan Chintakuntla

On Fri, Jul 9, 2021 at 2:44 PM Bruce Richardson
<bruce.richardson@intel.com> wrote:
>
> On Fri, Jul 09, 2021 at 12:05:40AM +0530, Jerin Jacob wrote:
> > On Thu, Jul 8, 2021 at 8:41 AM fengchengwen <fengchengwen@huawei.com> wrote:
> > >
> >
> > > >>>
> > > >>> It's just more conditionals and branches all through the code. Inside the
> > > >>> user application, the user has to check whether to set the flag or not (or
> > > >>> special-case the last transaction outside the loop), and within the driver,
> > > >>> there has to be a branch whether or not to call the doorbell function. The
> > > >>> code on both sides is far simpler and more readable if the doorbell
> > > >>> function is exactly that - a separate function.
> > > >>
> > > >> I disagree. The reason is:
> > > >>
> > > >> We will have two classes of applications
> > > >>
> > > >> a) do dma copy request as and when it has data(I think, this is the
> > > >> prime use case), for those,
> > > >> I think, it is considerable overhead to have two function invocation
> > > >> per transfer i.e
> > > >> rte_dma_copy() and rte_dma_perform()
> > > >>
> > > >> b) do dma copy when the data is reached to a logical state,  like copy
> > > >> IP frame from Ethernet packets or so,
> > > >> In that case, the application will have  a LOGIC to detect when to
> > > >> perform it so on the end of
> > > >> that rte_dma_copy() flag can be updated to fire the doorbell.
> > > >>
> > > >> IMO, We are comparing against a branch(flag is already in register) vs
> > > >> a set of instructions for
> > > >> 1) function pointer overhead
> > > >> 2) Need to use the channel context again back in another function.
> > > >>
> > > >> IMO, a single branch is most optimal from performance PoV.
> > > >>
> > > > Ok, let's try it and see how it goes.
> > >
> > > Test result show:
> > > 1) For Kunpeng platform (ARMv8) could benefit very little with doorbell in flags
> > > 2) For Xeon E5-2690 v2 (X86) could benefit with separate function
> > > 3) Both platform could benefit with doorbell in flags if burst < 5
> > >
> > > There is a performance gain in small bursts (<5). Given the extensive use of bursts
> >  in DPDK applications and users are accustomed to the concept, I do
> > not recommend
> > > using the 'doorbell' in flags.
> >
> > There is NO concept change between one option vs other option. Just
> > argument differnet.
> > Also, _perform() scheme not used anywhere in DPDK. I
> >
> > Regarding performance, I have added dummy instructions to simulate the real work
> > load[1], now burst also has some gain in both x86 and arm64[3]
> >
> > I have modified your application[2] to dpdk test application to use
> > cpu isolation etc.
> > So this is gain in flag scheme ad code is checked in to Github[2[
> >
> <snip>
>
> The benchmark numbers all seem very close between the two schemes. On my
> team we pretty much have test ioat & idxd drivers ported internally to the
> last dmadev draft library, and have sample apps handling traffic using
> those. I'll therefore attempt to get these numbers with real traffic on
> real drivers to just double check that it's the same as these
> microbenchmarks.

Thanks.

>
> Assuming that perf is the same, how to resolve this? Some thoughts:
> * As I understand it, the main objection to the separate doorbell function
>   is the use of 8-bytes in fastpath slot. Therefore I will also attempt to
>   benchmark having the doorbell function not on the same cacheline and check
>   perf impact, if any.

Probably we can remove rte_dmadev_fill_sg() variant and keep sg only for copy
to save 8B.

> * If we don't have a impact to perf by having the doorbell function inside
>   the regular "ops" rather than on fastpath cacheline, there is no reason
>   we can't implement both schemes. The user can then choose themselves
>   whether to doorbell using a flag on last item, or to doorbell explicitly
>   using function call.

Yes. I think, we can keep both.

>
> Of the two schemes, and assuming they are equal, I do have a preference for
> the separate function one, primarily from a code readability point of view.
> Other than that, I have no strong opinions.
>
> /Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (4 preceding siblings ...)
  2021-07-06 20:28 ` [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates Bruce Richardson
@ 2021-07-11  9:25 ` Chengwen Feng
  2021-07-11  9:42   ` fengchengwen
                     ` (6 more replies)
  2021-07-13 12:27 ` [dpdk-dev] [PATCH v3] " Chengwen Feng
                   ` (23 subsequent siblings)
  29 siblings, 7 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-07-11  9:25 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, liangma

This patch introduce 'dmadevice' which is a generic type of DMA
device.

The APIs of dmadev library exposes some generic operations which can
enable configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
 MAINTAINERS                  |    4 +
 config/rte_config.h          |    3 +
 lib/dmadev/meson.build       |    6 +
 lib/dmadev/rte_dmadev.c      |  560 +++++++++++++++++++++++
 lib/dmadev/rte_dmadev.h      | 1030 ++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h |  159 +++++++
 lib/dmadev/rte_dmadev_pmd.h  |   72 +++
 lib/dmadev/version.map       |   40 ++
 lib/meson.build              |    1 +
 9 files changed, 1875 insertions(+)
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 4347555..0595239 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -496,6 +496,10 @@ F: drivers/raw/skeleton/
 F: app/test/test_rawdev.c
 F: doc/guides/prog_guide/rawdev.rst
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+
 
 Memory Pool Drivers
 -------------------
diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c..331a431 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -81,6 +81,9 @@
 /* rawdev defines */
 #define RTE_RAWDEV_MAX_DEVS 64
 
+/* dmadev defines */
+#define RTE_DMADEV_MAX_DEVS 64
+
 /* ip_fragmentation defines */
 #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
 #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000..c918dae
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+sources = files('rte_dmadev.c')
+headers = files('rte_dmadev.h', 'rte_dmadev_pmd.h')
+indirect_headers += files('rte_dmadev_core.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000..8a29abb
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,560 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+RTE_LOG_REGISTER(rte_dmadev_logtype, lib.dmadev, INFO);
+
+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
+
+static const char *MZ_RTE_DMADEV_DATA = "rte_dmadev_data";
+/* Shared memory between primary and secondary processes. */
+static struct {
+	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
+} *dmadev_shared_data;
+
+static int
+dmadev_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMADEV_LOG(ERR, "Name can't be NULL\n");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMADEV_LOG(ERR, "Zero length DMA device name\n");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
+		RTE_DMADEV_LOG(ERR, "DMA device name is too long\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint16_t
+dmadev_find_free_dev(void)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (dmadev_shared_data->data[i].dev_name[0] == '\0') {
+			RTE_ASSERT(rte_dmadevices[i].attached == 0);
+			return i;
+		}
+	}
+
+	return RTE_DMADEV_MAX_DEVS;
+}
+
+static struct rte_dmadev*
+dmadev_allocated(const char *name)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if ((rte_dmadevices[i].attached == 1) &&
+		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
+			return &rte_dmadevices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dmadev_shared_data_prepare(void)
+{
+	const struct rte_memzone *mz;
+
+	if (dmadev_shared_data == NULL) {
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			/* Allocate port data and ownership shared memory. */
+			mz = rte_memzone_reserve(MZ_RTE_DMADEV_DATA,
+					 sizeof(*dmadev_shared_data),
+					 rte_socket_id(), 0);
+		} else {
+			mz = rte_memzone_lookup(MZ_RTE_DMADEV_DATA);
+		}
+		if (mz == NULL)
+			return -ENOMEM;
+
+		dmadev_shared_data = mz->addr;
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+			memset(dmadev_shared_data->data, 0,
+			       sizeof(dmadev_shared_data->data));
+	}
+
+	return 0;
+}
+
+static struct rte_dmadev *
+dmadev_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t dev_id;
+
+	dev = dmadev_allocated(name);
+	if (dev != NULL) {
+		RTE_DMADEV_LOG(ERR, "DMA device already allocated\n");
+		return NULL;
+	}
+
+	dev_id = dmadev_find_free_dev();
+	if (dev_id == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices\n");
+		return NULL;
+	}
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+	dev->data = &dmadev_shared_data->data[dev_id];
+	dev->data->dev_id = dev_id;
+	strlcpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+
+	return dev;
+}
+
+static struct rte_dmadev *
+dmadev_attach_secondary(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t i;
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %s is not driven by the primary process\n",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[i];
+	dev->data = &dmadev_shared_data->data[i];
+	RTE_ASSERT(dev->data->dev_id == i);
+
+	return dev;
+}
+
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dmadev_allocate(name);
+	else
+		dev = dmadev_attach_secondary(name);
+
+	if (dev == NULL)
+		return NULL;
+	dev->attached = 1;
+
+	return dev;
+}
+
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev)
+{
+	if (dev == NULL)
+		return -EINVAL;
+
+	if (dev->attached == 0)
+		return 0;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		rte_free(dev->data->dev_private);
+		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
+	}
+
+	memset(dev, 0, sizeof(struct rte_dmadev));
+	dev->attached = 0;
+
+	return 0;
+}
+
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name)
+{
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+	return dmadev_allocated(name);
+}
+
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id)
+{
+	if (dev_id >= RTE_DMADEV_MAX_DEVS ||
+	    rte_dmadevices[dev_id].attached == 0)
+		return false;
+	return true;
+}
+
+uint16_t
+rte_dmadev_count(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].attached == 1)
+			count++;
+	}
+
+	return count;
+}
+
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
+{
+	struct rte_dmadev *dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(dev_info, -EINVAL);
+
+	dev = &rte_dmadevices[dev_id];
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info);
+	if (ret != 0)
+		return ret;
+
+	dev_info->device = dev->device;
+
+	return 0;
+}
+
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
+{
+	struct rte_dmadev_info info;
+	struct rte_dmadev *dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(dev_conf, -EINVAL);
+	dev = &rte_dmadevices[dev_id];
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->max_vchans > info.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure too many vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_mt_vchan &&
+	    !(info.dev_capa & RTE_DMA_DEV_CAPA_MT_VCHAN)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support MT-safe vchan\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_mt_multi_vchan &&
+	    !(info.dev_capa & RTE_DMA_DEV_CAPA_MT_MULTI_VCHAN)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support MT-safe multiple vchan\n",
+			dev_id);
+		return -EINVAL;
+	}
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf);
+	if (ret == 0)
+		memcpy(&dev->data->dev_conf, dev_conf, sizeof(*dev_conf));
+
+	return ret;
+}
+
+int
+rte_dmadev_start(uint16_t dev_id)
+{
+	struct rte_dmadev *dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_dmadevices[dev_id];
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u already started\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->data->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dmadev_stop(uint16_t dev_id)
+{
+	struct rte_dmadev *dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_dmadevices[dev_id];
+
+	if (dev->data->dev_started == 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u already stopped\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dmadev_close(uint16_t dev_id)
+{
+	struct rte_dmadev *dev;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_dmadevices[dev_id];
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped before closing\n", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	return (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_dmadev_reset(uint16_t dev_id)
+{
+	struct rte_dmadev *dev;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_dmadevices[dev_id];
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
+	/* Reset is not dependent on state of the device */
+	return (*dev->dev_ops->dev_reset)(dev);
+}
+
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf)
+{
+	struct rte_dmadev_info info;
+	struct rte_dmadev *dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(conf, -EINVAL);
+
+	dev = &rte_dmadevices[dev_id];
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == 0 ||
+	    conf->direction & ~RTE_DMA_TRANSFER_DIR_ALL) {
+		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction & RTE_DMA_MEM_TO_MEM &&
+	    !(info.dev_capa & RTE_DMA_DEV_CAPA_MEM_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction & RTE_DMA_MEM_TO_DEV &&
+	    !(info.dev_capa & RTE_DMA_DEV_CAPA_MEM_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction & RTE_DMA_DEV_TO_MEM &&
+	    !(info.dev_capa & RTE_DMA_DEV_CAPA_DEV_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction & RTE_DMA_DEV_TO_DEV &&
+	    !(info.dev_capa & RTE_DMA_DEV_CAPA_DEV_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u number of descriptors invalid\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, conf);
+}
+
+int
+rte_dmadev_vchan_release(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_dmadevices[dev_id];
+
+	if (vchan >= dev->data->dev_conf.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_release, -ENOTSUP);
+	return (*dev->dev_ops->vchan_release)(dev, vchan);
+}
+
+int
+rte_dmadev_stats_get(uint16_t dev_id, int vchan, struct rte_dmadev_stats *stats)
+{
+	struct rte_dmadev *dev;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(stats, -EINVAL);
+
+	dev = &rte_dmadevices[dev_id];
+
+	if (vchan < -1 || vchan >= dev->data->dev_conf.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats);
+}
+
+int
+rte_dmadev_stats_reset(uint16_t dev_id, int vchan)
+{
+	struct rte_dmadev *dev;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_dmadevices[dev_id];
+
+	if (vchan < -1 || vchan >= dev->data->dev_conf.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f)
+{
+	struct rte_dmadev_info info;
+	struct rte_dmadev *dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(f, -EINVAL);
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+
+	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
+	fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
+	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
+	fprintf(f, "  max_vchans_configured: %u\n", info.nb_vchans);
+	fprintf(f, "  MT-safe-configured: vchans: %u multi-vchans: %u\n",
+		dev->data->dev_conf.enable_mt_vchan,
+		dev->data->dev_conf.enable_mt_multi_vchan);
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
+
+int
+rte_dmadev_selftest(uint16_t dev_id)
+{
+	struct rte_dmadev *dev;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_dmadevices[dev_id];
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
+	return (*dev->dev_ops->dev_selftest)(dev_id);
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000..8779512
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,1030 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ * Copyright(c) 2021 Marvell International Ltd.
+ */
+
+#ifndef _RTE_DMADEV_H_
+#define _RTE_DMADEV_H_
+
+/**
+ * @file rte_dmadev.h
+ *
+ * RTE DMA (Direct Memory Access) device APIs.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW-DMA-channel |               | HW-DMA-channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW-DMA-Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multilpe HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channel, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel.
+ * E.G. Application could create virtual DMA channel 0 for mem-to-mem transfer
+ *      scenario, and create virtual DMA channel 1 for mem-to-dev transfer
+ *      scenario.
+ *
+ * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
+ * PCI/SoC device probing phase performed at EAL initialization time. And could
+ * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
+ * phase.
+ *
+ * We use 'uint16_t dev_id' as the device identifier of a dmadev, and
+ * 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dmadev_configure()
+ *     - rte_dmadev_vchan_setup()
+ *     - rte_dmadev_start()
+ *
+ * Then, the application can invoke dataplane APIs to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. call
+ * rte_dmadev_configure()), it must call rte_dmadev_stop() first to stop the
+ * device and then do the reconfiguration before calling rte_dmadev_start()
+ * again. The dataplane APIs should not be invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the
+ * rte_dmadev_close() function.
+ *
+ * The dataplane APIs include two parts:
+ *   a) The first part is the submission of operation requests:
+ *        - rte_dmadev_copy()
+ *        - rte_dmadev_copy_sg() - scatter-gather form of copy
+ *        - rte_dmadev_fill()
+ *        - rte_dmadev_fill_sg() - scatter-gather form of fill
+ *        - rte_dmadev_perform() - issue doorbell to hardware
+ *      These APIs could work with different virtual DMA channels which have
+ *      different contexts.
+ *      The first four APIs are used to submit the operation request to the
+ *      virtual DMA channel, if the submission is successful, a uint16_t
+ *      ring_idx is returned, otherwise a negative number is returned.
+ *   b) The second part is to obtain the result of requests:
+ *        - rte_dmadev_completed()
+ *            - return the number of operation requests completed successfully.
+ *        - rte_dmadev_completed_fails()
+ *            - return the number of operation requests failed to complete.
+ *
+ * About the ring_idx which rte_dmadev_copy/copy_sg/fill/fill_sg() returned,
+ * the rules are as follows:
+ *   a) ring_idx for each virtual DMA channel are independent.
+ *   b) For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *      when it reach UINT16_MAX, it wraps back to zero.
+ *   c) The initial ring_idx of a virtual DMA channel is zero, after the device
+ *      is stopped or reset, the ring_idx needs to be reset to zero.
+ *   Example:
+ *      step-1: start one dmadev
+ *      step-2: enqueue a copy operation, the ring_idx return is 0
+ *      step-3: enqueue a copy operation again, the ring_idx return is 1
+ *      ...
+ *      step-101: stop the dmadev
+ *      step-102: start the dmadev
+ *      step-103: enqueue a copy operation, the cookie return is 0
+ *      ...
+ *      step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *      step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *      ...
+ *
+ * By default, all the non-dataplane functions of the dmadev API exported by a
+ * PMD are lock-free functions which assume to not be invoked in parallel on
+ * different logical cores to work on the same target object.
+ *
+ * The dataplane functions of the dmadev API exported by a PMD can be MT-safe
+ * only when supported by the driver, generally, the driver will reports two
+ * capabilities:
+ *   a) Whether to support MT-safe for the submit/completion API of the same
+ *      virtual DMA channel.
+ *      E.G. one thread do submit operation, another thread do completion
+ *           operation.
+ *      If driver support it, then declare RTE_DMA_DEV_CAPA_MT_VCHAN.
+ *      If driver don't support it, it's up to the application to guarantee
+ *      MT-safe.
+ *   b) Whether to support MT-safe for different virtual DMA channels.
+ *      E.G. one thread do operation on virtual DMA channel 0, another thread
+ *           do operation on virtual DMA channel 1.
+ *      If driver support it, then declare RTE_DMA_DEV_CAPA_MT_MULTI_VCHAN.
+ *      If driver don't support it, it's up to the application to guarantee
+ *      MT-safe.
+ *
+ */
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
+
+extern int rte_dmadev_logtype;
+
+#define RTE_DMADEV_LOG(level, ...) \
+	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "" __VA_ARGS__)
+
+/* Macros to check for valid port */
+#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dmadev_is_valid_dev(dev_id)) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+#define RTE_DMADEV_VALID_DEV_ID_OR_RET(dev_id) do { \
+	if (!rte_dmadev_is_valid_dev(dev_id)) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
+		return; \
+	} \
+} while (0)
+
+/**
+ * @internal
+ * Validate if the DMA device index is a valid attached DMA device.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_internal
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id);
+
+/**
+ * rte_dma_sg - can hold scatter DMA operation request
+ */
+struct rte_dma_sg {
+	rte_iova_t src;
+	rte_iova_t dst;
+	uint32_t length;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_count(void);
+
+/**
+ * The capabilities of a DMA device
+ */
+#define RTE_DMA_DEV_CAPA_MEM_TO_MEM	(1ull << 0)
+/**< DMA device support mem-to-mem transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+#define RTE_DMA_DEV_CAPA_MEM_TO_DEV	(1ull << 1)
+/**< DMA device support slave mode & mem-to-dev transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+#define RTE_DMA_DEV_CAPA_DEV_TO_MEM	(1ull << 2)
+/**< DMA device support slave mode & dev-to-mem transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+#define RTE_DMA_DEV_CAPA_DEV_TO_DEV	(1ull << 3)
+/**< DMA device support slave mode & dev-to-dev transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+#define RTE_DMA_DEV_CAPA_OPS_COPY	(1ull << 4)
+/**< DMA device support copy ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+#define RTE_DMA_DEV_CAPA_OPS_FILL	(1ull << 5)
+/**< DMA device support fill ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+#define RTE_DMA_DEV_CAPA_OPS_SG		(1ull << 6)
+/**< DMA device support scatter-list ops.
+ * If device support ops_copy and ops_sg, it means supporting copy_sg ops.
+ * If device support ops_fill and ops_sg, it means supporting fill_sg ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+#define RTE_DMA_DEV_CAPA_FENCE		(1ull << 7)
+/**< DMA device support fence.
+ * If device support fence, then application could set a fence flags when
+ * enqueue operation by rte_dma_copy/copy_sg/fill/fill_sg.
+ * If a operation has a fence flags, it means the operation must be processed
+ * only after all previous operations are completed.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+#define RTE_DMA_DEV_CAPA_SVA		(1ull << 8)
+/**< DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+#define RTE_DMA_DEV_CAPA_MT_VCHAN	(1ull << 9)
+/**< DMA device support MT-safe of a virtual DMA channel.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+#define RTE_DMA_DEV_CAPA_MT_MULTI_VCHAN	(1ull << 10)
+/**< DMA device support MT-safe of different virtual DMA channels.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+/**
+ * A structure used to retrieve the contextual information of
+ * an DMA device
+ */
+struct rte_dmadev_info {
+	struct rte_device *device; /**< Generic Device information */
+	uint64_t dev_capa; /**< Device capabilities (RTE_DMA_DEV_CAPA_) */
+	/** Maximum number of virtual DMA channels supported */
+	uint16_t max_vchans;
+	/** Maximum allowed number of virtual DMA channel descriptors */
+	uint16_t max_desc;
+	/** Minimum allowed number of virtual DMA channel descriptors */
+	uint16_t min_desc;
+	uint16_t nb_vchans; /**< Number of virtual DMA channel configured */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve the contextual information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
+ *   contextual information of the device.
+ *
+ * @return
+ *   - =0: Success, driver updates the contextual information of the DMA device
+ *   - <0: Error code returned by the driver info get function.
+ *
+ */
+__rte_experimental
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ */
+struct rte_dmadev_conf {
+	/** Maximum number of virtual DMA channel to use.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dmadev_info which get from rte_dmadev_info_get().
+	 */
+	uint16_t max_vchans;
+	/** Enable bit for MT-safe of a virtual DMA channel.
+	 * This bit can be enabled only when the device supports
+	 * RTE_DMA_DEV_CAPA_MT_VCHAN.
+	 * @see RTE_DMA_DEV_CAPA_MT_VCHAN
+	 */
+	uint8_t enable_mt_vchan : 1;
+	/** Enable bit for MT-safe of different virtual DMA channels.
+	 * This bit can be enabled only when the device supports
+	 * RTE_DMA_DEV_CAPA_MT_MULTI_VCHAN.
+	 * @see RTE_DMA_DEV_CAPA_MT_MULTI_VCHAN
+	 */
+	uint8_t enable_mt_multi_vchan : 1;
+	uint64_t reserved[2]; /**< Reserved for future fields */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dmadev_conf
+ *   object.
+ *
+ * @return
+ *   - =0: Success, device configured.
+ *   - <0: Error code returned by the driver configuration function.
+ */
+__rte_experimental
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device started.
+ *   - <0: Error code returned by the driver start function.
+ */
+__rte_experimental
+int
+rte_dmadev_start(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dmadev_start()
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device stopped.
+ *   - <0: Error code returned by the driver stop function.
+ */
+__rte_experimental
+int
+rte_dmadev_stop(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *  - =0: Successfully close device
+ *  - <0: Failure to close device
+ */
+__rte_experimental
+int
+rte_dmadev_close(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset a DMA device.
+ *
+ * This is different from cycle of rte_dmadev_start->rte_dmadev_stop in the
+ * sense similar to hard or soft reset.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Successfully reset device.
+ *   - <0: Failure to reset device.
+ *   - (-ENOTSUP): If the device doesn't support this function.
+ */
+__rte_experimental
+int
+rte_dmadev_reset(uint16_t dev_id);
+
+/**
+ * DMA transfer direction defines.
+ */
+#define RTE_DMA_MEM_TO_MEM	(1ull << 0)
+/**< DMA transfer direction - from memory to memory.
+ *
+ * @see struct rte_dmadev_vchan_conf::direction
+ */
+#define RTE_DMA_MEM_TO_DEV	(1ull << 1)
+/**< DMA transfer direction - slave mode & from memory to device.
+ * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs. In
+ * this case, the ARM SoCs works in slave mode, it could initiate a DMA move
+ * request from ARM memory to x86 host memory.
+ *
+ * @see struct rte_dmadev_vchan_conf::direction
+ */
+#define RTE_DMA_DEV_TO_MEM	(1ull << 2)
+/**< DMA transfer direction - slave mode & from device to memory.
+ * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs. In
+ * this case, the ARM SoCs works in slave mode, it could initiate a DMA move
+ * request from x86 host memory to ARM memory.
+ *
+ * @see struct rte_dmadev_vchan_conf::direction
+ */
+#define RTE_DMA_DEV_TO_DEV	(1ull << 3)
+/**< DMA transfer direction - slave mode & from device to device.
+ * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs. In
+ * this case, the ARM SoCs works in slave mode, it could initiate a DMA move
+ * request from x86 host memory to another x86 host memory.
+ *
+ * @see struct rte_dmadev_vchan_conf::direction
+ */
+#define RTE_DMA_TRANSFER_DIR_ALL	(RTE_DMA_MEM_TO_MEM | \
+					 RTE_DMA_MEM_TO_DEV | \
+					 RTE_DMA_DEV_TO_MEM | \
+					 RTE_DMA_DEV_TO_DEV)
+
+/**
+ * enum rte_dma_slave_port_type - slave mode type defines
+ */
+enum rte_dma_slave_port_type {
+	/** The slave port is PCIE. */
+	RTE_DMA_SLAVE_PORT_PCIE = 1,
+};
+
+/**
+ * A structure used to descript slave port parameters.
+ */
+struct rte_dma_slave_port_parameters {
+	enum rte_dma_slave_port_type port_type;
+	union {
+		/** For PCIE port */
+		struct {
+			/** The physical function number which to use */
+			uint64_t pf_number : 6;
+			/** Virtual function enable bit */
+			uint64_t vf_enable : 1;
+			/** The virtual function number which to use */
+			uint64_t vf_number : 8;
+			uint64_t pasid : 20;
+			/** The attributes filed in TLP packet */
+			uint64_t tlp_attr : 3;
+		};
+	};
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ */
+struct rte_dmadev_vchan_conf {
+	uint8_t direction; /**< Set of supported transfer directions */
+	/** Number of descriptor for the virtual DMA channel */
+	uint16_t nb_desc;
+	/** 1) Used to describes the dev parameter in the mem-to-dev/dev-to-mem
+	 * transfer scenario.
+	 * 2) Used to describes the src dev parameter in the dev-to-dev
+	 * transfer scenario.
+	 */
+	struct rte_dma_slave_port_parameters port;
+	/** Used to describes the dst dev parameters in the dev-to-dev
+	 * transfer scenario.
+	 */
+	struct rte_dma_slave_port_parameters peer_port;
+	uint64_t reserved[2]; /**< Reserved for future fields */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dmadev_vchan_conf object.
+ *
+ * @return
+ *   - >=0: Allocate success, it is the virtual DMA channel id. This value must
+ *          be less than the field 'max_vchans' of struct rte_dmadev_conf
+	    which configured by rte_dmadev_configure().
+ *   - <0: Error code returned by the driver virtual channel setup function.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Release a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel which return by vchan setup.
+ *
+ * @return
+ *   - =0: Successfully release the virtual DMA channel.
+ *   - <0: Error code returned by the driver virtual channel release function.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_release(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * rte_dmadev_stats - running statistics.
+ */
+struct rte_dmadev_stats {
+	/** Count of operations which were successfully enqueued */
+	uint64_t enqueued_count;
+	/** Count of operations which were submitted to hardware */
+	uint64_t submitted_count;
+	/** Count of operations which failed to complete */
+	uint64_t completed_fail_count;
+	/** Count of operations which successfully complete */
+	uint64_t completed_count;
+	uint64_t reserved[4]; /**< Reserved for future fields */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel, -1 means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dmadev_stats
+ *   object.
+ *
+ * @return
+ *   - =0: Successfully retrieve stats.
+ *   - <0: Failure to retrieve stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_get(uint16_t dev_id, int vchan,
+		     struct rte_dmadev_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel, -1 means all channels.
+ *
+ * @return
+ *   - =0: Successfully reset stats.
+ *   - <0: Failure to reset stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_reset(uint16_t dev_id, int vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Non-zero otherwise.
+ */
+__rte_experimental
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger the dmadev self test.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - 0: Selftest successful.
+ *   - -ENOTSUP if the device doesn't support selftest
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_selftest(uint16_t dev_id);
+
+#include "rte_dmadev_core.h"
+
+/**
+ *  DMA flags to augment operation preparation.
+ *  Used as the 'flags' parameter of rte_dmadev_copy/copy_sg/fill/fill_sg.
+ */
+#define RTE_DMA_FLAG_FENCE	(1ull << 0)
+/**< DMA fence flag
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ *
+ * @see rte_dmadev_copy()
+ * @see rte_dmadev_copy_sg()
+ * @see rte_dmadev_fill()
+ * @see rte_dmadev_fill_sg()
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, but does not
+ * trigger hardware to begin that operation.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy job.
+ *   - <0: Error code returned by the driver copy function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+		uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+#ifdef RTE_DMADEV_DEBUG
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+	if (vchan >= dev->data->dev_conf.max_vchans) {
+		RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
+		return -EINVAL;
+	}
+#endif
+	return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter list copy operation to be performed by hardware,
+ * but does not trigger hardware to begin that operation.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param sg
+ *   The pointer of scatterlist.
+ * @param sg_len
+ *   The number of scatterlist elements.
+ * @param flags
+ *   An flags for this operation.
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy job.
+ *   - <0: Error code returned by the driver copy function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, const struct rte_dma_sg *sg,
+		   uint32_t sg_len, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+#ifdef RTE_DMADEV_DEBUG
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(sg, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+	if (vchan >= dev->data->dev_conf.max_vchans) {
+		RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
+		return -EINVAL;
+	}
+#endif
+	return (*dev->copy_sg)(dev, vchan, sg, sg_len, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, but does not
+ * trigger hardware to begin that operation.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy job.
+ *   - <0: Error code returned by the driver copy function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
+		rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+#ifdef RTE_DMADEV_DEBUG
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+	if (vchan >= dev->data->dev_conf.max_vchans) {
+		RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
+		return -EINVAL;
+	}
+#endif
+	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter list fill operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter list fill operation to be performed by hardware,
+ * but does not trigger hardware to begin that operation.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param sg
+ *   The pointer of scatterlist.
+ * @param sg_len
+ *   The number of scatterlist elements.
+ * @param flags
+ *   An flags for this operation.
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy job.
+ *   - <0: Error code returned by the driver copy function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_fill_sg(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
+		   const struct rte_dma_sg *sg, uint32_t sg_len,
+		   uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+#ifdef RTE_DMADEV_DEBUG
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(sg, -ENOTSUP);
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+	if (vchan >= dev->data->dev_conf.max_vchans) {
+		RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
+		return -EINVAL;
+	}
+#endif
+	return (*dev->fill_sg)(dev, vchan, pattern, sg, sg_len, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dmadev_copy/fill()
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   - =0: Successfully trigger hardware.
+ *   - <0: Failure to trigger hardware.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+#ifdef RTE_DMADEV_DEBUG
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+	if (vchan >= dev->data->dev_conf.max_vchans) {
+		RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
+		return -EINVAL;
+	}
+#endif
+	return (*dev->submit)(dev, vchan);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		     uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, -ENOTSUP);
+	if (vchan >= dev->data->dev_conf.max_vchans) {
+		RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
+		return -EINVAL;
+	}
+	if (nb_cpls == 0) {
+		RTE_DMADEV_LOG(ERR, "Invalid nb_cpls\n");
+		return -EINVAL;
+	}
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
+
+/**
+ * DMA transfer status code defines
+ */
+enum rte_dma_status_code {
+	/** The operation completed successfully */
+	RTE_DMA_STATUS_SUCCESSFUL = 0,
+	/** The operation failed to complete due active drop
+	 * This is mainly used when processing dev_stop, allow outstanding
+	 * requests to be completed as much as possible.
+	 */
+	RTE_DMA_STATUS_ACTIVE_DROP,
+	/** The operation failed to complete due invalid source address */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/** The operation failed to complete due invalid destination address */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/** The operation failed to complete due invalid length */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/** The operation failed to complete due invalid opcode
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/** The operation failed to complete due bus err */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/** The operation failed to complete due data poison */
+	RTE_DMA_STATUS_DATA_POISION,
+	/** The operation failed to complete due descriptor read error */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/** The operation failed to complete due device link error
+	 * Used to indicates that the link error in the mem-to-dev/dev-to-mem/
+	 * dev-to-dev transfer scenario.
+	 */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/** Driver specific status code offset
+	 * Start status code for the driver to define its own error code.
+	 */
+	RTE_DMA_STATUS_DRV_SPECIFIC_OFFSET = 0x10000,
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that failed to complete.
+ * NOTE: This API was used when rte_dmadev_completed has_error was set.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_status
+ *   Indicates the size of status array.
+ * @param[out] status
+ *   The error code of operations that failed to complete.
+ *   Some standard error code are described in 'enum rte_dma_status_code'
+ *   @see rte_dma_status_code
+ * @param[out] last_idx
+ *   The last failed completed operation's index.
+ *
+ * @return
+ *   The number of operations that failed to complete.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vchan,
+			   const uint16_t nb_status, uint32_t *status,
+			   uint16_t *last_idx)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+#ifdef RTE_DMADEV_DEBUG
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(status, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(last_idx, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_fails, -ENOTSUP);
+	if (vchan >= dev->data->dev_conf.max_vchans) {
+		RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
+		return -EINVAL;
+	}
+	if (nb_status == 0) {
+		RTE_DMADEV_LOG(ERR, "Invalid nb_status\n");
+		return -EINVAL;
+	}
+#endif
+	return (*dev->completed_fails)(dev, vchan, nb_status, status, last_idx);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_H_ */
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000..410faf0
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#ifndef _RTE_DMADEV_CORE_H_
+#define _RTE_DMADEV_CORE_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device internal header.
+ *
+ * This header contains internal data types, that are used by the DMA devices
+ * in order to expose their ops to the class.
+ *
+ * Applications should not use these API directly.
+ *
+ */
+
+struct rte_dmadev;
+
+/** @internal Used to get device information of a device. */
+typedef int (*dmadev_info_get_t)(struct rte_dmadev *dev,
+				 struct rte_dmadev_info *dev_info);
+
+/** @internal Used to configure a device. */
+typedef int (*dmadev_configure_t)(struct rte_dmadev *dev,
+				  const struct rte_dmadev_conf *dev_conf);
+
+/** @internal Used to start a configured device. */
+typedef int (*dmadev_start_t)(struct rte_dmadev *dev);
+
+/** @internal Used to stop a configured device. */
+typedef int (*dmadev_stop_t)(struct rte_dmadev *dev);
+
+/** @internal Used to close a configured device. */
+typedef int (*dmadev_close_t)(struct rte_dmadev *dev);
+
+/** @internal Used to reset a configured device. */
+typedef int (*dmadev_reset_t)(struct rte_dmadev *dev);
+
+/** @internal Used to allocate and set up a virtual DMA channel. */
+typedef int (*dmadev_vchan_setup_t)(struct rte_dmadev *dev,
+				    const struct rte_dmadev_vchan_conf *conf);
+
+/** @internal Used to release a virtual DMA channel. */
+typedef int (*dmadev_vchan_release_t)(struct rte_dmadev *dev, uint16_t vchan);
+
+/** @internal Used to retrieve basic statistics. */
+typedef int (*dmadev_stats_get_t)(struct rte_dmadev *dev, int vchan,
+				  struct rte_dmadev_stats *stats);
+
+/** @internal Used to reset basic statistics. */
+typedef int (*dmadev_stats_reset_t)(struct rte_dmadev *dev, int vchan);
+
+/** @internal Used to dump internal information. */
+typedef int (*dmadev_dump_t)(struct rte_dmadev *dev, FILE *f);
+
+/** @internal Used to start dmadev selftest. */
+typedef int (*dmadev_selftest_t)(uint16_t dev_id);
+
+/** @internal Used to enqueue a copy operation. */
+typedef int (*dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
+			     rte_iova_t src, rte_iova_t dst,
+			     uint32_t length, uint64_t flags);
+
+/** @internal Used to enqueue a scatter list copy operation. */
+typedef int (*dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
+				const struct rte_dma_sg *sg,
+				uint32_t sg_len, uint64_t flags);
+
+/** @internal Used to enqueue a fill operation. */
+typedef int (*dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
+			     uint64_t pattern, rte_iova_t dst,
+			     uint32_t length, uint64_t flags);
+
+/** @internal Used to enqueue a scatter list fill operation. */
+typedef int (*dmadev_fill_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
+			uint64_t pattern, const struct rte_dma_sg *sg,
+			uint32_t sg_len, uint64_t flags);
+
+/** @internal Used to trigger hardware to begin working. */
+typedef int (*dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
+
+/** @internal Used to return number of successful completed operations. */
+typedef uint16_t (*dmadev_completed_t)(struct rte_dmadev *dev, uint16_t vchan,
+				       const uint16_t nb_cpls,
+				       uint16_t *last_idx, bool *has_error);
+
+/** @internal Used to return number of failed completed operations. */
+typedef uint16_t (*dmadev_completed_fails_t)(struct rte_dmadev *dev,
+			uint16_t vchan, const uint16_t nb_status,
+			uint32_t *status, uint16_t *last_idx);
+
+/**
+ * DMA device operations function pointer table
+ */
+struct rte_dmadev_ops {
+	dmadev_info_get_t dev_info_get;
+	dmadev_configure_t dev_configure;
+	dmadev_start_t dev_start;
+	dmadev_stop_t dev_stop;
+	dmadev_close_t dev_close;
+	dmadev_reset_t dev_reset;
+	dmadev_vchan_setup_t vchan_setup;
+	dmadev_vchan_release_t vchan_release;
+	dmadev_stats_get_t stats_get;
+	dmadev_stats_reset_t stats_reset;
+	dmadev_dump_t dev_dump;
+	dmadev_selftest_t dev_selftest;
+};
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_dmadev_data {
+	uint16_t dev_id; /**< Device [external] identifier. */
+	char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	void *dev_private; /**< PMD-specific private data. */
+	struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[4]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ *
+ * The dataplane APIs are located at the beginning of the structure, along
+ * with the pointer to where all the data elements for the particular device
+ * are stored in shared memory. This split scheme allows the function pointer
+ * and driver data to be per-process, while the actual configuration data for
+ * the device is shared.
+ */
+struct rte_dmadev {
+	dmadev_copy_t copy;
+	dmadev_copy_sg_t copy_sg;
+	dmadev_fill_t fill;
+	dmadev_fill_sg_t fill_sg;
+	dmadev_submit_t submit;
+	dmadev_completed_t completed;
+	dmadev_completed_fails_t completed_fails;
+	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
+	/** Flag indicating the device is attached: ATTACHED(1)/DETACHED(0). */
+	uint8_t attached : 1;
+	/** Device info which supplied during device initialization. */
+	struct rte_device *device;
+	struct rte_dmadev_data *data; /**< Pointer to device data. */
+	uint64_t reserved[4]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+extern struct rte_dmadev rte_dmadevices[];
+
+#endif /* _RTE_DMADEV_CORE_H_ */
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000..45141f9
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_PMD_H_
+#define _RTE_DMADEV_PMD_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device PMD APIs
+ *
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param dev
+ *   Device to be released.
+ *
+ * @return
+ *   - 0 on success, negative on error
+ */
+__rte_internal
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev);
+
+/**
+ * @internal
+ * Return the DMA device based on the device name.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_PMD_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000..0f099e7
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,40 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dmadev_count;
+	rte_dmadev_info_get;
+	rte_dmadev_configure;
+	rte_dmadev_start;
+	rte_dmadev_stop;
+	rte_dmadev_close;
+	rte_dmadev_reset;
+	rte_dmadev_vchan_setup;
+	rte_dmadev_vchan_release;
+	rte_dmadev_stats_get;
+	rte_dmadev_stats_reset;
+	rte_dmadev_dump;
+	rte_dmadev_selftest;
+	rte_dmadev_copy;
+	rte_dmadev_copy_sg;
+	rte_dmadev_fill;
+	rte_dmadev_fill_sg;
+	rte_dmadev_submit;
+	rte_dmadev_completed;
+	rte_dmadev_completed_fails;
+
+	local: *;
+};
+
+INTERNAL {
+        global:
+
+	rte_dmadevices;
+	rte_dmadev_pmd_allocate;
+	rte_dmadev_pmd_release;
+	rte_dmadev_get_device_by_name;
+
+	local:
+
+	rte_dmadev_is_valid_dev;
+};
+
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4..68d239f 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -60,6 +60,7 @@ libraries = [
         'bpf',
         'graph',
         'node',
+        'dmadev',
 ]
 
 if is_windows
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library
  2021-07-11  9:25 ` [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library Chengwen Feng
@ 2021-07-11  9:42   ` fengchengwen
  2021-07-11 13:34     ` Jerin Jacob
  2021-07-11 14:25   ` Jerin Jacob
                     ` (5 subsequent siblings)
  6 siblings, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-07-11  9:42 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, liangma

Note:
1) This patch hold dmadev <> vchan layer, I think vchan can be very
   conceptually separated from hw-channel.
2) I could not under struct dpi_dma_queue_ctx_s, so this patch I define
   the rte_dma_slave_port_parameters refer to Kunpeng DMA implemention.
3) This patch hasn't include doxy related file because failed to generate
   a doc in my environment, could this upstream as a new patch or must
   solved ?

Feedback welcome, thanks

On 2021/7/11 17:25, Chengwen Feng wrote:
> This patch introduce 'dmadevice' which is a generic type of DMA
> device.
> 
> The APIs of dmadev library exposes some generic operations which can
> enable configuration and I/O with the DMA devices.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> ---


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library
  2021-07-11  9:42   ` fengchengwen
@ 2021-07-11 13:34     ` Jerin Jacob
  2021-07-12  7:40       ` Morten Brørup
  0 siblings, 1 reply; 339+ messages in thread
From: Jerin Jacob @ 2021-07-11 13:34 UTC (permalink / raw)
  To: fengchengwen
  Cc: Thomas Monjalon, Ferruh Yigit, Richardson, Bruce, Jerin Jacob,
	dpdk-dev, Morten Brørup, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma

On Sun, Jul 11, 2021 at 3:12 PM fengchengwen <fengchengwen@huawei.com> wrote:
>
> Note:
> 1) This patch hold dmadev <> vchan layer, I think vchan can be very
>    conceptually separated from hw-channel.

I would like to keep it as channel instead of virtual channel as it is
implementation-specific.
No strong opinion on this? @Richardson, Bruce  @Morten Brørup  thoughts

> 2) I could not under struct dpi_dma_queue_ctx_s, so this patch I define
>    the rte_dma_slave_port_parameters refer to Kunpeng DMA implemention.
> 3) This patch hasn't include doxy related file because failed to generate
>    a doc in my environment, could this upstream as a new patch or must
>    solved ?

No IMO. The final version needs to be merged to should have split patch-like,

1) Header file with doxygen comments
2) Multiple patches for implementation as needed
3) Programmer  guide doc

Other items, Typically we will have per new device class.

1) Skelton driver(can use memcpy in this case)
2) app/test-dmadev kind of application.(Can be used to measure
performance and functionality)

>
> Feedback welcome, thanks
>
> On 2021/7/11 17:25, Chengwen Feng wrote:
> > This patch introduce 'dmadevice' which is a generic type of DMA
> > device.
> >
> > The APIs of dmadev library exposes some generic operations which can
> > enable configuration and I/O with the DMA devices.
> >
> > Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> > ---
>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library
  2021-07-11  9:25 ` [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library Chengwen Feng
  2021-07-11  9:42   ` fengchengwen
@ 2021-07-11 14:25   ` Jerin Jacob
  2021-07-12  7:15   ` Morten Brørup
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 339+ messages in thread
From: Jerin Jacob @ 2021-07-11 14:25 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: Thomas Monjalon, Ferruh Yigit, Richardson, Bruce, Jerin Jacob,
	dpdk-dev, Morten Brørup, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma

On Sun, Jul 11, 2021 at 2:59 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
>
> This patch introduce 'dmadevice' which is a generic type of DMA
> device.
>
> The APIs of dmadev library exposes some generic operations which can
> enable configuration and I/O with the DMA devices.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
> new file mode 100644
> index 0000000..8779512
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev.h
> @@ -0,0 +1,1030 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + * Copyright(c) 2021 Intel Corporation.
> + * Copyright(c) 2021 Marvell International Ltd.
> + */
> +
> +#ifndef _RTE_DMADEV_H_
> +#define _RTE_DMADEV_H_
> +
> +/**
> + * @file rte_dmadev.h
> + *
> + * RTE DMA (Direct Memory Access) device APIs.
> + *
> + * The DMA framework is built on the following model:
> + *
> + *     ---------------   ---------------       ---------------
> + *     | virtual DMA |   | virtual DMA |       | virtual DMA |
> + *     | channel     |   | channel     |       | channel     |
> + *     ---------------   ---------------       ---------------
> + *            |                |                      |
> + *            ------------------                      |
> + *                     |                              |
> + *               ------------                    ------------
> + *               |  dmadev  |                    |  dmadev  |
> + *               ------------                    ------------
> + *                     |                              |
> + *            ------------------               ------------------
> + *            | HW-DMA-channel |               | HW-DMA-channel |
> + *            ------------------               ------------------
> + *                     |                              |
> + *                     --------------------------------
> + *                                     |
> + *                           ---------------------
> + *                           | HW-DMA-Controller |
> + *                           ---------------------
> + *
> + * The DMA controller could have multilpe HW-DMA-channels (aka. HW-DMA-queues),

Typo - multiple

> + * each HW-DMA-channel should be represented by a dmadev.
> + *
> + * The dmadev could create multiple virtual DMA channel, each virtual DMA
> + * channel represents a different transfer context. The DMA operation request
> + * must be submitted to the virtual DMA channel.
> + * E.G. Application could create virtual DMA channel 0 for mem-to-mem transfer
> + *      scenario, and create virtual DMA channel 1 for mem-to-dev transfer
> + *      scenario.
> + *
> + * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
> + * PCI/SoC device probing phase performed at EAL initialization time. And could
> + * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
> + * phase.
> + *
> + * We use 'uint16_t dev_id' as the device identifier of a dmadev, and

Please remove "We use" and reword accordingly.

> + * 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
> + *
> + * The functions exported by the dmadev API to setup a device designated by its
> + * device identifier must be invoked in the following order:
> + *     - rte_dmadev_configure()
> + *     - rte_dmadev_vchan_setup()
> + *     - rte_dmadev_start()
> + *
> + * Then, the application can invoke dataplane APIs to process jobs.
> + *
> + * If the application wants to change the configuration (i.e. call
> + * rte_dmadev_configure()), it must call rte_dmadev_stop() first to stop the
> + * device and then do the reconfiguration before calling rte_dmadev_start()
> + * again. The dataplane APIs should not be invoked when the device is stopped.
> + *
> + * Finally, an application can close a dmadev by invoking the
> + * rte_dmadev_close() function.
> + *
> + * The dataplane APIs include two parts:
> + *   a) The first part is the submission of operation requests:
> + *        - rte_dmadev_copy()
> + *        - rte_dmadev_copy_sg() - scatter-gather form of copy
> + *        - rte_dmadev_fill()
> + *        - rte_dmadev_fill_sg() - scatter-gather form of fill
> + *        - rte_dmadev_perform() - issue doorbell to hardware
> + *      These APIs could work with different virtual DMA channels which have
> + *      different contexts.
> + *      The first four APIs are used to submit the operation request to the
> + *      virtual DMA channel, if the submission is successful, a uint16_t
> + *      ring_idx is returned, otherwise a negative number is returned.
> + *   b) The second part is to obtain the result of requests:
> + *        - rte_dmadev_completed()
> + *            - return the number of operation requests completed successfully.
> + *        - rte_dmadev_completed_fails()
> + *            - return the number of operation requests failed to complete.
> + *
> + * About the ring_idx which rte_dmadev_copy/copy_sg/fill/fill_sg() returned,
> + * the rules are as follows:
> + *   a) ring_idx for each virtual DMA channel are independent.
> + *   b) For a virtual DMA channel, the ring_idx is monotonically incremented,
> + *      when it reach UINT16_MAX, it wraps back to zero.
> + *   c) The initial ring_idx of a virtual DMA channel is zero, after the device
> + *      is stopped or reset, the ring_idx needs to be reset to zero.
> + *   Example:
> + *      step-1: start one dmadev
> + *      step-2: enqueue a copy operation, the ring_idx return is 0
> + *      step-3: enqueue a copy operation again, the ring_idx return is 1
> + *      ...
> + *      step-101: stop the dmadev
> + *      step-102: start the dmadev
> + *      step-103: enqueue a copy operation, the cookie return is 0
> + *      ...
> + *      step-x+0: enqueue a fill operation, the ring_idx return is 65535
> + *      step-x+1: enqueue a copy operation, the ring_idx return is 0
> + *      ...
> + *
> + * By default, all the non-dataplane functions of the dmadev API exported by a
> + * PMD are lock-free functions which assume to not be invoked in parallel on
> + * different logical cores to work on the same target object.
> + *
> + * The dataplane functions of the dmadev API exported by a PMD can be MT-safe
> + * only when supported by the driver, generally, the driver will reports two
> + * capabilities:
> + *   a) Whether to support MT-safe for the submit/completion API of the same
> + *      virtual DMA channel.
> + *      E.G. one thread do submit operation, another thread do completion
> + *           operation.
> + *      If driver support it, then declare RTE_DMA_DEV_CAPA_MT_VCHAN.
> + *      If driver don't support it, it's up to the application to guarantee
> + *      MT-safe.
> + *   b) Whether to support MT-safe for different virtual DMA channels.
> + *      E.G. one thread do operation on virtual DMA channel 0, another thread
> + *           do operation on virtual DMA channel 1.
> + *      If driver support it, then declare RTE_DMA_DEV_CAPA_MT_MULTI_VCHAN.
> + *      If driver don't support it, it's up to the application to guarantee
> + *      MT-safe.

The above cases make it difficult to write applications . Also,
application locking using spinlock
etc may not better optimization.(If an driver can do it better way).
IMO, As discussed with @Morten Brørup ,
I think, it is better while configuring the channel application can
specify, do they  "need" MT safe to
have a portable application.Unlike network or crypto device, Since we
are creating the virtual channel
such scheme is useful.


> + *
> + */
> +
> +#include <rte_common.h>
> +#include <rte_compat.h>
> +#include <rte_errno.h>
> +#include <rte_memory.h>
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +#define RTE_DMADEV_NAME_MAX_LEN        RTE_DEV_NAME_MAX_LEN
> +
> +extern int rte_dmadev_logtype;
> +

Missing Doxygen comment

> +#define RTE_DMADEV_LOG(level, ...) \
> +       rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "" __VA_ARGS__)
> +

Make it as internal.

> +/* Macros to check for valid port */
> +#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
> +       if (!rte_dmadev_is_valid_dev(dev_id)) { \
> +               RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
> +               return retval; \
> +       } \
> +} while (0)
> +

Make it as internal.

> +#define RTE_DMADEV_VALID_DEV_ID_OR_RET(dev_id) do { \
> +       if (!rte_dmadev_is_valid_dev(dev_id)) { \
> +               RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
> +               return; \
> +       } \
> +} while (0)
> +
> +/**
> + * @internal

We can make it as a public API.

> + * Validate if the DMA device index is a valid attached DMA device.
> + *
> + * @param dev_id
> + *   DMA device index.
> + *
> + * @return
> + *   - If the device index is valid (true) or not (false).
> + */
> +__rte_internal
> +bool
> +rte_dmadev_is_valid_dev(uint16_t dev_id);
> +
> +/**
> + * rte_dma_sg - can hold scatter DMA operation request
> + */
> +struct rte_dma_sg {
> +       rte_iova_t src;
> +       rte_iova_t dst;

IMO, it should be only rte_iova_t addr. See comment at  _sg API

> +       uint32_t length;
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Get the total number of DMA devices that have been successfully
> + * initialised.
> + *
> + * @return
> + *   The total number of usable DMA devices.
> + */
> +__rte_experimental
> +uint16_t
> +rte_dmadev_count(void);
> +
> +/**
> + * The capabilities of a DMA device
> + */
> +#define RTE_DMA_DEV_CAPA_MEM_TO_MEM    (1ull << 0)
> +/**< DMA device support mem-to-mem transfer.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_MEM_TO_DEV    (1ull << 1)
> +/**< DMA device support slave mode & mem-to-dev transfer.

Do we need to say slave mode? Just mem to dev is fine. Right?

> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_DEV_TO_MEM    (1ull << 2)
> +/**< DMA device support slave mode & dev-to-mem transfer.

See above.

> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_DEV_TO_DEV    (1ull << 3)
> +/**< DMA device support slave mode & dev-to-dev transfer.

See above.

> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_OPS_COPY      (1ull << 4)
> +/**< DMA device support copy ops.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_OPS_FILL      (1ull << 5)
> +/**< DMA device support fill ops.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_OPS_SG                (1ull << 6)
> +/**< DMA device support scatter-list ops.
> + * If device support ops_copy and ops_sg, it means supporting copy_sg ops.
> + * If device support ops_fill and ops_sg, it means supporting fill_sg ops.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_FENCE         (1ull << 7)
> +/**< DMA device support fence.
> + * If device support fence, then application could set a fence flags when
> + * enqueue operation by rte_dma_copy/copy_sg/fill/fill_sg.
> + * If a operation has a fence flags, it means the operation must be processed
> + * only after all previous operations are completed.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_SVA           (1ull << 8)
> +/**< DMA device support SVA which could use VA as DMA address.
> + * If device support SVA then application could pass any VA address like memory
> + * from rte_malloc(), rte_memzone(), malloc, stack memory.
> + * If device don't support SVA, then application should pass IOVA address which
> + * from rte_malloc(), rte_memzone().
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_MT_VCHAN      (1ull << 9)
> +/**< DMA device support MT-safe of a virtual DMA channel.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_MT_MULTI_VCHAN        (1ull << 10)
> +/**< DMA device support MT-safe of different virtual DMA channels.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +
> +/**
> + * A structure used to retrieve the contextual information of
> + * an DMA device
> + */
> +struct rte_dmadev_info {
> +       struct rte_device *device; /**< Generic Device information */
> +       uint64_t dev_capa; /**< Device capabilities (RTE_DMA_DEV_CAPA_) */
> +       /** Maximum number of virtual DMA channels supported */
> +       uint16_t max_vchans;

Doxygen comment should come after the symbol. Not above.

> +       /** Maximum allowed number of virtual DMA channel descriptors */
> +       uint16_t max_desc;
> +       /** Minimum allowed number of virtual DMA channel descriptors */
> +       uint16_t min_desc;
> +       uint16_t nb_vchans; /**< Number of virtual DMA channel configured */
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Retrieve the contextual information of a DMA device.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param[out] dev_info
> + *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
> + *   contextual information of the device.
> + *
> + * @return
> + *   - =0: Success, driver updates the contextual information of the DMA device
> + *   - <0: Error code returned by the driver info get function.
> + *
> + */
> +__rte_experimental
> +int
> +rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
> +
> +/**
> + * A structure used to configure a DMA device.
> + */
> +struct rte_dmadev_conf {
> +       /** Maximum number of virtual DMA channel to use.
> +        * This value cannot be greater than the field 'max_vchans' of struct
> +        * rte_dmadev_info which get from rte_dmadev_info_get().
> +        */
> +       uint16_t max_vchans;
> +       /** Enable bit for MT-safe of a virtual DMA channel.
> +        * This bit can be enabled only when the device supports
> +        * RTE_DMA_DEV_CAPA_MT_VCHAN.
> +        * @see RTE_DMA_DEV_CAPA_MT_VCHAN
> +        */
> +       uint8_t enable_mt_vchan : 1;
> +       /** Enable bit for MT-safe of different virtual DMA channels.
> +        * This bit can be enabled only when the device supports
> +        * RTE_DMA_DEV_CAPA_MT_MULTI_VCHAN.
> +        * @see RTE_DMA_DEV_CAPA_MT_MULTI_VCHAN

I think, we can support this even if the flag is not supported to limit the
application fastpath options.

> +        */
> +       uint8_t enable_mt_multi_vchan : 1;
> +       uint64_t reserved[2]; /**< Reserved for future fields */
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Configure a DMA device.
> + *
> + * This function must be invoked first before any other function in the
> + * API. This function can also be re-invoked when a device is in the
> + * stopped state.
> + *
> + * @param dev_id
> + *   The identifier of the device to configure.
> + * @param dev_conf
> + *   The DMA device configuration structure encapsulated into rte_dmadev_conf
> + *   object.
> + *
> + * @return
> + *   - =0: Success, device configured.
> + *   - <0: Error code returned by the driver configuration function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Start a DMA device.
> + *
> + * The device start step is the last one and consists of setting the DMA
> + * to start accepting jobs.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - =0: Success, device started.
> + *   - <0: Error code returned by the driver start function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_start(uint16_t dev_id);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Stop a DMA device.
> + *
> + * The device can be restarted with a call to rte_dmadev_start()
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - =0: Success, device stopped.
> + *   - <0: Error code returned by the driver stop function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_stop(uint16_t dev_id);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Close a DMA device.
> + *
> + * The device cannot be restarted after this call.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *  - =0: Successfully close device
> + *  - <0: Failure to close device
> + */
> +__rte_experimental
> +int
> +rte_dmadev_close(uint16_t dev_id);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Reset a DMA device.
> + *
> + * This is different from cycle of rte_dmadev_start->rte_dmadev_stop in the
> + * sense similar to hard or soft reset.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - =0: Successfully reset device.
> + *   - <0: Failure to reset device.
> + *   - (-ENOTSUP): If the device doesn't support this function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_reset(uint16_t dev_id);

Is this required now?

> +
> +/**
> + * DMA transfer direction defines.
> + */
> +#define RTE_DMA_MEM_TO_MEM     (1ull << 0)

RTE_DMA_DIRECTION_...

> +/**< DMA transfer direction - from memory to memory.
> + *
> + * @see struct rte_dmadev_vchan_conf::direction
> + */
> +#define RTE_DMA_MEM_TO_DEV     (1ull << 1)
> +/**< DMA transfer direction - slave mode & from memory to device.
> + * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs. In
> + * this case, the ARM SoCs works in slave mode, it could initiate a DMA move
> + * request from ARM memory to x86 host memory.
> + *
> + * @see struct rte_dmadev_vchan_conf::direction
> + */
> +#define RTE_DMA_DEV_TO_MEM     (1ull << 2)
> +/**< DMA transfer direction - slave mode & from device to memory.
> + * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs. In
> + * this case, the ARM SoCs works in slave mode, it could initiate a DMA move
> + * request from x86 host memory to ARM memory.
> + *
> + * @see struct rte_dmadev_vchan_conf::direction
> + */
> +#define RTE_DMA_DEV_TO_DEV     (1ull << 3)
> +/**< DMA transfer direction - slave mode & from device to device.
> + * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs. In
> + * this case, the ARM SoCs works in slave mode, it could initiate a DMA m> + * request from x86 host memory to another x86 host memory.
> + *
> + * @see struct rte_dmadev_vchan_conf::direction
> + */
> +#define RTE_DMA_TRANSFER_DIR_ALL       (RTE_DMA_MEM_TO_MEM | \
> +                                        RTE_DMA_MEM_TO_DEV | \
> +                                        RTE_DMA_DEV_TO_MEM | \
> +                                        RTE_DMA_DEV_TO_DEV)
> +
> +/**
> + * enum rte_dma_slave_port_type - slave mode type defines
> + */
> +enum rte_dma_slave_port_type {

I think, rte_dmadev_dev_type

> +       /** The slave port is PCIE. */
> +       RTE_DMA_SLAVE_PORT_PCIE = 1,
> +};
> +
> +/**
> + * A structure used to descript slave port parameters.*
> + */
> +struct rte_dma_slave_port_parameters {

May be rte_dmadev_dev_conf?

> +       enum rte_dma_slave_port_type port_type;
> +       union {
> +               /** For PCIE port */
> +               struct {
> +                       /** The physical function number which to use */
> +                       uint64_t pf_number : 6;
> +                       /** Virtual function enable bit */
> +                       uint64_t vf_enable : 1;
> +                       /** The virtual function number which to use */
> +                       uint64_t vf_number : 8;
> +                       uint64_t pasid : 20;
> +                       /** The attributes filed in TLP packet */
> +                       uint64_t tlp_attr : 3;
> +               };
> +       };
> +};
> +
> +/**
> + * A structure used to configure a virtual DMA channel.
> + */
> +struct rte_dmadev_vchan_conf {
> +       uint8_t direction; /**< Set of supported transfer directions */
We could add @see RTE_DMA_DIRECTION_*.
Also, say, how the application can know the valid flags.aka point to info.


> +       /** Number of descriptor for the virtual DMA channel */
> +       uint16_t nb_desc;
> +       /** 1) Used to describes the dev parameter in the mem-to-dev/dev-to-mem
> +        * transfer scenario.
> +        * 2) Used to describes the src dev parameter in the dev-to-dev
> +        * transfer scenario.
> +        */
> +       struct rte_dma_slave_port_parameters port;
> +       /** Used to describes the dst dev parameters in the dev-to-dev
> +        * transfer scenario.
> +        */
> +       struct rte_dma_slave_port_parameters peer_port;
> +       uint64_t reserved[2]; /**< Reserved for future fields */
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Allocate and set up a virtual DMA channel.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param conf
> + *   The virtual DMA channel configuration structure encapsulated into
> + *   rte_dmadev_vchan_conf object.
> + *
> + * @return
> + *   - >=0: Allocate success, it is the virtual DMA channel id. This value must
> + *          be less than the field 'max_vchans' of struct rte_dmadev_conf
> +           which configured by rte_dmadev_configure().
> + *   - <0: Error code returned by the driver virtual channel setup function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_vchan_setup(uint16_t dev_id,
> +                      const struct rte_dmadev_vchan_conf *conf);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Release a virtual DMA channel.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel which return by vchan setup.
> + *
> + * @return
> + *   - =0: Successfully release the virtual DMA channel.
> + *   - <0: Error code returned by the driver virtual channel release function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_vchan_release(uint16_t dev_id, uint16_t vchan);

We are not making release as pubic API in other device class. See ethdev spec.


> +
> +/**
> + * rte_dmadev_stats - running statistics.
> + */
> +struct rte_dmadev_stats {
> +       /** Count of operations which were successfully enqueued */
> +       uint64_t enqueued_count;
> +       /** Count of operations which were submitted to hardware */
> +       uint64_t submitted_count;
> +       /** Count of operations which failed to complete */
> +       uint64_t completed_fail_count;
> +       /** Count of operations which successfully complete */
> +       uint64_t completed_count;
> +       uint64_t reserved[4]; /**< Reserved for future fields */
> +};

Please add the capability for each counter in info structure as one
device may support all
the counters.

> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Retrieve basic statistics of a or all virtual DMA channel(s).
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel, -1 means all channels.
> + * @param[out] stats
> + *   The basic statistics structure encapsulated into rte_dmadev_stats
> + *   object.
> + *
> + * @return
> + *   - =0: Successfully retrieve stats.
> + *   - <0: Failure to retrieve stats.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_stats_get(uint16_t dev_id, int vchan,
> +                    struct rte_dmadev_stats *stats);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Reset basic statistics of a or all virtual DMA channel(s).
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel, -1 means all channels.
> + *
> + * @return
> + *   - =0: Successfully reset stats.
> + *   - <0: Failure to reset stats.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_stats_reset(uint16_t dev_id, int vchan);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Dump DMA device info.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param f
> + *   The file to write the output to.
> + *
> + * @return
> + *   0 on success. Non-zero otherwise.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_dump(uint16_t dev_id, FILE *f);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Trigger the dmadev self test.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - 0: Selftest successful.
> + *   - -ENOTSUP if the device doesn't support selftest
> + *   - other values < 0 on failure.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_selftest(uint16_t dev_id);
> +
> +#include "rte_dmadev_core.h"
> +
> +/**
> + *  DMA flags to augment operation preparation.
> + *  Used as the 'flags' parameter of rte_dmadev_copy/copy_sg/fill/fill_sg.
> + */
> +#define RTE_DMA_FLAG_FENCE     (1ull << 0)

RTE_DMA_OP_FLAG_FENCE

Please add also add RTE_DMA_OP_FLAG_SUMBIT as we discussed in another thread.
We can support submit based on the flag and _submit() version based on
application preference.

> +/**< DMA fence flag
> + * It means the operation with this flag must be processed only after all
> + * previous operations are completed.
> + *
> + * @see rte_dmadev_copy()
> + * @see rte_dmadev_copy_sg()
> + * @see rte_dmadev_fill()
> + * @see rte_dmadev_fill_sg()
> + */
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a copy operation onto the virtual DMA channel.
> + *
> + * This queues up a copy operation to be performed by hardware, but does not
> + * trigger hardware to begin that operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param src
> + *   The address of the source buffer.
> + * @param dst
> + *   The address of the destination buffer.
> + * @param length
> + *   The length of the data to be copied.
> + * @param flags
> + *   An flags for this operation.

See RTE_DMA_OP_FLAG_*

> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued copy job.
> + *   - <0: Error code returned by the driver copy function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
> +               uint32_t length, uint64_t flags)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +#ifdef RTE_DMADEV_DEBUG
> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
> +       if (vchan >= dev->data->dev_conf.max_vchans) {
> +               RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> +               return -EINVAL;
> +       }
> +#endif
> +       return (*dev->copy)(dev, vchan, src, dst, length, flags)> +
> +/**

> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a scatter list copy operation onto the virtual DMA channel.
> + *
> + * This queues up a scatter list copy operation to be performed by hardware,
> + * but does not trigger hardware to begin that operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param sg
> + *   The pointer of scatterlist.
> + * @param sg_len
> + *   The number of scatterlist elements.
> + * @param flags
> + *   An flags for this operation.
> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued copy job.
> + *   - <0: Error code returned by the driver copy function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, const struct rte_dma_sg *sg,
> +                  uint32_t sg_len, uint64_t flags)


As requested earlier, I prefer to have

rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, const struct
rte_dma_sg *src, uint32_t nb_src, const struct rte_dma_sg *dst,
uint32_t nb_dst, uint64_t flags)


> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +#ifdef RTE_DMADEV_DEBUG
> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +       RTE_FUNC_PTR_OR_ERR_RET(sg, -EINVAL);
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
> +       if (vchan >= dev->data->dev_conf.max_vchans) {
> +               RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> +               return -EINVAL;
> +       }
> +#endif
> +       return (*dev->copy_sg)(dev, vchan, sg, sg_len, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a fill operation onto the virtual DMA channel.
> + *
> + * This queues up a fill operation to be performed by hardware, but does not
> + * trigger hardware to begin that operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param pattern
> + *   The pattern to populate the destination buffer with.
> + * @param dst
> + *   The address of the destination buffer.
> + * @param length
> + *   The length of the destination buffer.
> + * @param flags
> + *   An flags for this operation.
> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued copy job.
> + *   - <0: Error code returned by the driver copy function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
> +               rte_iova_t dst, uint32_t length, uint64_t flags)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +#ifdef RTE_DMADEV_DEBUG
> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
> +       if (vchan >= dev->data->dev_conf.max_vchans) {
> +               RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> +               return -EINVAL;
> +       }
> +#endif
> +       return (*dev->fill)(dev, vchan, pattern, dst, length, flags);

Instead of every driver set the NOP function, In the common code, If
the CAPA is not set,
common code can set NOP function for this with <0 return value.

> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a scatter list fill operation onto the virtual DMA channel.
> + *
> + * This queues up a scatter list fill operation to be performed by hardware,
> + * but does not trigger hardware to begin that operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param pattern
> + *   The pattern to populate the destination buffer with.
> + * @param sg
> + *   The pointer of scatterlist.
> + * @param sg_len
> + *   The number of scatterlist elements.
> + * @param flags
> + *   An flags for this operation.
> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued copy job.
> + *   - <0: Error code returned by the driver copy function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_fill_sg(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
> +                  const struct rte_dma_sg *sg, uint32_t sg_len,
> +                  uint64_t flags)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +#ifdef RTE_DMADEV_DEBUG
> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +       RTE_FUNC_PTR_OR_ERR_RET(sg, -ENOTSUP);
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
> +       if (vchan >= dev->data->dev_conf.max_vchans) {
> +               RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> +               return -EINVAL;
> +       }
> +#endif
> +       return (*dev->fill_sg)(dev, vchan, pattern, sg, sg_len, flags);

In order to save 8B in rte_dmadev this API can be removed as looks like none
of the drivers supports fill in sg mode.

> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Trigger hardware to begin performing enqueued operations.
> + *
> + * This API is used to write the "doorbell" to the hardware to trigger it
> + * to begin the operations previously enqueued by rte_dmadev_copy/fill()
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + *
> + * @return
> + *   - =0: Successfully trigger hardware.
> + *   - <0: Failure to trigger hardware.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +#ifdef RTE_DMADEV_DEBUG
> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
> +       if (vchan >= dev->data->dev_conf.max_vchans) {
> +               RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> +               return -EINVAL;
> +       }
> +#endif
> +       return (*dev->submit)(dev, vchan);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Returns the number of operations that have been successfully completed.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param nb_cpls
> + *   The maximum number of completed operations that can be processed.
> + * @param[out] last_idx
> + *   The last completed operation's index.
> + *   If not required, NULL can be passed in.
> + * @param[out] has_error
> + *   Indicates if there are transfer error.
> + *   If not required, NULL can be passed in.
> + *
> + * @return
> + *   The number of operations that successfully completed.
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
> +                    uint16_t *last_idx, bool *has_error)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       uint16_t idx;
> +       bool err;
> +
> +#ifdef RTE_DMADEV_DEBUG
> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, -ENOTSUP);
> +       if (vchan >= dev->data->dev_conf.max_vchans) {
> +               RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> +               return -EINVAL;
> +       }
> +       if (nb_cpls == 0) {
> +               RTE_DMADEV_LOG(ERR, "Invalid nb_cpls\n");
> +               return -EINVAL;
> +       }
> +#endif
> +
> +       /* Ensure the pointer values are non-null to simplify drivers.
> +        * In most cases these should be compile time evaluated, since this is
> +        * an inline function.
> +        * - If NULL is explicitly passed as parameter, then compiler knows the
> +        *   value is NULL
> +        * - If address of local variable is passed as parameter, then compiler
> +        *   can know it's non-NULL.
> +        */
> +       if (last_idx == NULL)
> +               last_idx = &idx;
> +       if (has_error == NULL)
> +               has_error = &err;
> +
> +       *has_error = false;
> +       return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
> +}
> +
> +/**
> + * DMA transfer status code defines
> + */
> +enum rte_dma_status_code {
> +       /** The operation completed successfully */
> +       RTE_DMA_STATUS_SUCCESSFUL = 0,
> +       /** The operation failed to complete due active drop
> +        * This is mainly used when processing dev_stop, allow outstanding
> +        * requests to be completed as much as possible.
> +        */
> +       RTE_DMA_STATUS_ACTIVE_DROP,
> +       /** The operation failed to complete due invalid source address */
> +       RTE_DMA_STATUS_INVALID_SRC_ADDR,
> +       /** The operation failed to complete due invalid destination address */
> +       RTE_DMA_STATUS_INVALID_DST_ADDR,
> +       /** The operation failed to complete due invalid length */
> +       RTE_DMA_STATUS_INVALID_LENGTH,
> +       /** The operation failed to complete due invalid opcode
> +        * The DMA descriptor could have multiple format, which are
> +        * distinguished by the opcode field.
> +        */
> +       RTE_DMA_STATUS_INVALID_OPCODE,
> +       /** The operation failed to complete due bus err */
> +       RTE_DMA_STATUS_BUS_ERROR,
> +       /** The operation failed to complete due data poison */
> +       RTE_DMA_STATUS_DATA_POISION,
> +       /** The operation failed to complete due descriptor read error */
> +       RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
> +       /** The operation failed to complete due device link error
> +        * Used to indicates that the link error in the mem-to-dev/dev-to-mem/
> +        * dev-to-dev transfer scenario.
> +        */
> +       RTE_DMA_STATUS_DEV_LINK_ERROR,
> +       /** Driver specific status code offset
> +        * Start status code for the driver to define its own error code.


RTE_DMA_STATUS_UNKNOWN for the ones which are not added in public API spec.


> +        */
> +       RTE_DMA_STATUS_DRV_SPECIFIC_OFFSET = 0x10000,
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Returns the number of operations that failed to complete.
> + * NOTE: This API was used when rte_dmadev_completed has_error was set.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param nb_status
> + *   Indicates the size of status array.
> + * @param[out] status
> + *   The error code of operations that failed to complete.
> + *   Some standard error code are described in 'enum rte_dma_status_code'
> + *   @see rte_dma_status_code
> + * @param[out] last_idx
> + *   The last failed completed operation's index.
> + *
> + * @return
> + *   The number of operations that failed to complete.
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vchan,
> +                          const uint16_t nb_status, uint32_t *status,

uint32_t -> enum rte_dma_status_code


> +                          uint16_t *last_idx)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +#ifdef RTE_DMADEV_DEBUG
> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +       RTE_FUNC_PTR_OR_ERR_RET(status, -EINVAL);
> +       RTE_FUNC_PTR_OR_ERR_RET(last_idx, -EINVAL);
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_fails, -ENOTSUP);
> +       if (vchan >= dev->data->dev_conf.max_vchans) {
> +               RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> +               return -EINVAL;
> +       }
> +       if (nb_status == 0) {
> +               RTE_DMADEV_LOG(ERR, "Invalid nb_status\n");
> +               return -EINVAL;
> +       }
> +#endif
> +       return (*dev->completed_fails)(dev, vchan, nb_status, status, last_idx);
> +}
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* _RTE_DMADEV_H_ */
> diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
> new file mode 100644
> index 0000000..410faf0
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev_core.h
> @@ -0,0 +1,159 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + * Copyright(c) 2021 Intel Corporation.
> + */
> +
> +#ifndef _RTE_DMADEV_CORE_H_
> +#define _RTE_DMADEV_CORE_H_
> +
> +/**
> + * @file
> + *
> + * RTE DMA Device internal header.
> + *
> + * This header contains internal data types, that are used by the DMA devices
> + * in order to expose their ops to the class.
> + *
> + * Applications should not use these API directly.
> + *
> + */
> +
> +struct rte_dmadev;
> +
> +/** @internal Used to get device information of a device. */
> +typedef int (*dmadev_info_get_t)(struct rte_dmadev *dev,
> +                                struct rte_dmadev_info *dev_info);

Please change to rte_dmadev_info_get_t to avoid conflict due to namespace issue
as this header is exported.

> +
> +/** @internal Used to configure a device. */
> +typedef int (*dmadev_configure_t)(struct rte_dmadev *dev,
> +                                 const struct rte_dmadev_conf *dev_conf);
> +
> +/** @internal Used to start a configured device. */
> +typedef int (*dmadev_start_t)(struct rte_dmadev *dev);
> +
> +/** @internal Used to stop a configured device. */
> +typedef int (*dmadev_stop_t)(struct rte_dmadev *dev);
> +
> +/** @internal Used to close a configured device. */
> +typedef int (*dmadev_close_t)(struct rte_dmadev *dev);
> +
> +/** @internal Used to reset a configured device. */
> +typedef int (*dmadev_reset_t)(struct rte_dmadev *dev);
> +
> +/** @internal Used to allocate and set up a virtual DMA channel. */
> +typedef int (*dmadev_vchan_setup_t)(struct rte_dmadev *dev,
> +                                   const struct rte_dmadev_vchan_conf *conf);
> +
> +/** @internal Used to release a virtual DMA channel. */
> +typedef int (*dmadev_vchan_release_t)(struct rte_dmadev *dev, uint16_t vchan);
> +
> +/** @internal Used to retrieve basic statistics. */
> +typedef int (*dmadev_stats_get_t)(struct rte_dmadev *dev, int vchan,
> +                                 struct rte_dmadev_stats *stats);
> +
> +/** @internal Used to reset basic statistics. */
> +typedef int (*dmadev_stats_reset_t)(struct rte_dmadev *dev, int vchan);
> +
> +/** @internal Used to dump internal information. */
> +typedef int (*dmadev_dump_t)(struct rte_dmadev *dev, FILE *f);
> +
> +/** @internal Used to start dmadev selftest. */
> +typedef int (*dmadev_selftest_t)(uint16_t dev_id);
> +
> +/** @internal Used to enqueue a copy operation. */
> +typedef int (*dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
> +                            rte_iova_t src, rte_iova_t dst,
> +                            uint32_t length, uint64_t flags);
> +
> +/** @internal Used to enqueue a scatter list copy operation. */
> +typedef int (*dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
> +                               const struct rte_dma_sg *sg,
> +                               uint32_t sg_len, uint64_t flags);
> +
> +/** @internal Used to enqueue a fill operation. */
> +typedef int (*dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
> +                            uint64_t pattern, rte_iova_t dst,
> +                            uint32_t length, uint64_t flags);
> +
> +/** @internal Used to enqueue a scatter list fill operation. */
> +typedef int (*dmadev_fill_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
> +                       uint64_t pattern, const struct rte_dma_sg *sg,
> +                       uint32_t sg_len, uint64_t flags);
> +
> +/** @internal Used to trigger hardware to begin working. */
> +typedef int (*dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
> +
> +/** @internal Used to return number of successful completed operations. */
> +typedef uint16_t (*dmadev_completed_t)(struct rte_dmadev *dev, uint16_t vchan,
> +                                      const uint16_t nb_cpls,
> +                                      uint16_t *last_idx, bool *has_error);
> +
> +/** @internal Used to return number of failed completed operations. */
> +typedef uint16_t (*dmadev_completed_fails_t)(struct rte_dmadev *dev,
> +                       uint16_t vchan, const uint16_t nb_status,
> +                       uint32_t *status, uint16_t *last_idx);
> +
> +/**
> + * DMA device operations function pointer table
> + */
> +struct rte_dmadev_ops {
> +       dmadev_info_get_t dev_info_get;
> +       dmadev_configure_t dev_configure;
> +       dmadev_start_t dev_start;
> +       dmadev_stop_t dev_stop;
> +       dmadev_close_t dev_close;
> +       dmadev_reset_t dev_reset;
> +       dmadev_vchan_setup_t vchan_setup;
> +       dmadev_vchan_release_t vchan_release;
> +       dmadev_stats_get_t stats_get;
> +       dmadev_stats_reset_t stats_reset;
> +       dmadev_dump_t dev_dump;
> +       dmadev_selftest_t dev_selftest;
> +};
> +
> +/**
> + * @internal
> + * The data part, with no function pointers, associated with each DMA device.
> + *
> + * This structure is safe to place in shared memory to be common among different
> + * processes in a multi-process configuration.
> + */
> +struct rte_dmadev_data {
> +       uint16_t dev_id; /**< Device [external] identifier. */
> +       char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
> +       void *dev_private; /**< PMD-specific private data. */
> +       struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
> +       uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
> +       uint64_t reserved[4]; /**< Reserved for future fields */
> +} __rte_cache_aligned;
> +
> +/**
> + * @internal
> + * The generic data structure associated with each DMA device.
> + *
> + * The dataplane APIs are located at the beginning of the structure, along
> + * with the pointer to where all the data elements for the particular device
> + * are stored in shared memory. This split scheme allows the function pointer
> + * and driver data to be per-process, while the actual configuration data for
> + * the device is shared.
> + */
> +struct rte_dmadev {
> +       dmadev_copy_t copy;
> +       dmadev_copy_sg_t copy_sg;
> +       dmadev_fill_t fill;
> +       dmadev_fill_sg_t fill_sg;
> +       dmadev_submit_t submit;
> +       dmadev_completed_t completed;

We could add reserved here for any fastpath future additions.

> +       dmadev_completed_fails_t completed_fails;


> +       const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
> +       /** Flag indicating the device is attached: ATTACHED(1)/DETACHED(0). */
> +       uint8_t attached : 1;
> +       /** Device info which supplied during device initialization. */
> +       struct rte_device *device;
> +       struct rte_dmadev_data *data; /**< Pointer to device data. */
> +       uint64_t reserved[4]; /**< Reserved for future fields */
> +} __rte_cache_aligned;
> +
> +extern struct rte_dmadev rte_dmadevices[];
> +
> +#endif /* _RTE_DMADEV_CORE_H_ */
> diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
> new file mode 100644
> index 0000000..45141f9
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev_pmd.h
> @@ -0,0 +1,72 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + */
> +
> +#ifndef _RTE_DMADEV_PMD_H_
> +#define _RTE_DMADEV_PMD_H_
> +
> +/**
> + * @file
> + *
> + * RTE DMA Device PMD APIs
> + *
> + * Driver facing APIs for a DMA device. These are not to be called directly by
> + * any application.
> + */
> +
> +#include "rte_dmadev.h"
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +/**
> + * @internal
> + * Allocates a new dmadev slot for an DMA device and returns the pointer
> + * to that slot for the driver to use.
> + *
> + * @param name
> + *   DMA device name.
> + *
> + * @return
> + *   A pointer to the DMA device slot case of success,
> + *   NULL otherwise.
> + */
> +__rte_internal
> +struct rte_dmadev *
> +rte_dmadev_pmd_allocate(const char *name);
> +
> +/**
> + * @internal
> + * Release the specified dmadev.
> + *
> + * @param dev
> + *   Device to be released.
> + *
> + * @return
> + *   - 0 on success, negative on error
> + */
> +__rte_internal
> +int
> +rte_dmadev_pmd_release(struct rte_dmadev *dev);
> +
> +/**
> + * @internal
> + * Return the DMA device based on the device name.
> + *
> + * @param name
> + *   DMA device name.
> + *
> + * @return
> + *   A pointer to the DMA device slot case of success,
> + *   NULL otherwise.
> + */
> +__rte_internal
> +struct rte_dmadev *
> +rte_dmadev_get_device_by_name(const char *name);
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* _RTE_DMADEV_PMD_H_ */
> diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
> new file mode 100644
> index 0000000..0f099e7
> --- /dev/null
> +++ b/lib/dmadev/version.map
> @@ -0,0 +1,40 @@
> +EXPERIMENTAL {
> +       global:
> +
> +       rte_dmadev_count;
> +       rte_dmadev_info_get;
> +       rte_dmadev_configure;
> +       rte_dmadev_start;
> +       rte_dmadev_stop;
> +       rte_dmadev_close;
> +       rte_dmadev_reset;
> +       rte_dmadev_vchan_setup;
> +       rte_dmadev_vchan_release;
> +       rte_dmadev_stats_get;
> +       rte_dmadev_stats_reset;
> +       rte_dmadev_dump;
> +       rte_dmadev_selftest;
> +       rte_dmadev_copy;
> +       rte_dmadev_copy_sg;
> +       rte_dmadev_fill;
> +       rte_dmadev_fill_sg;
> +       rte_dmadev_submit;
> +       rte_dmadev_completed;
> +       rte_dmadev_completed_fails;
> +
> +       local: *;
> +};
> +
> +INTERNAL {
> +        global:
> +
> +       rte_dmadevices;
> +       rte_dmadev_pmd_allocate;
> +       rte_dmadev_pmd_release;
> +       rte_dmadev_get_device_by_name;
> +
> +       local:
> +
> +       rte_dmadev_is_valid_dev;
> +};
> +
> diff --git a/lib/meson.build b/lib/meson.build
> index 1673ca4..68d239f 100644
> --- a/lib/meson.build
> +++ b/lib/meson.build
> @@ -60,6 +60,7 @@ libraries = [
>          'bpf',
>          'graph',
>          'node',
> +        'dmadev',
>  ]
>
>  if is_windows
> --
> 2.8.1
>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-11  7:14                       ` Jerin Jacob
@ 2021-07-12  7:01                         ` Morten Brørup
  2021-07-12  7:59                           ` Jerin Jacob
  0 siblings, 1 reply; 339+ messages in thread
From: Morten Brørup @ 2021-07-12  7:01 UTC (permalink / raw)
  To: Jerin Jacob, Bruce Richardson
  Cc: fengchengwen, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	dpdk-dev, Nipun Gupta, Hemant Agrawal, Maxime Coquelin,
	Honnappa Nagarahalli, David Marchand, Satananda Burla,
	Prasun Kapoor, Ananyev, Konstantin, liangma,
	Radha Mohan Chintakuntla

> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Jerin Jacob

> Probably we can remove rte_dmadev_fill_sg() variant and keep sg only
> for copy to save 8B.

Perhaps the scatter/gather functions can be on a separate cache line, following the cache line with the simple functions?
Of course, this is only beneficial if the SG functions are not used with simple functions.
This means that we reserve space for 8 simple functions and 8 SG functions.

And if one or two functions are used with both simple and SG functions, it/they can be present in both cache lines. (This is somewhat dirty, but would be a typical implementation for a DPDK flow data structure.)

-Morten

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library
  2021-07-11  9:25 ` [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library Chengwen Feng
  2021-07-11  9:42   ` fengchengwen
  2021-07-11 14:25   ` Jerin Jacob
@ 2021-07-12  7:15   ` Morten Brørup
  2021-07-12  9:59   ` Jerin Jacob
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 339+ messages in thread
From: Morten Brørup @ 2021-07-12  7:15 UTC (permalink / raw)
  To: Chengwen Feng, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk
  Cc: dev, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, liangma

> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Chengwen Feng
> 
> This patch introduce 'dmadevice' which is a generic type of DMA
> device.
> 
> The APIs of dmadev library exposes some generic operations which can
> enable configuration and I/O with the DMA devices.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>

[snip]

> diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
> new file mode 100644
> index 0000000..8779512
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev.h
> @@ -0,0 +1,1030 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + * Copyright(c) 2021 Intel Corporation.
> + * Copyright(c) 2021 Marvell International Ltd.

If the group of DMA device hardware vendors don't oppose, I would appreciate it if my contribution to the definition of the DMA device API is recognized in this file:

+ * Copyright(c) 2021 SmartShare Systems.

> + */
> +

Also remember the other contributors in the other files, where appropriate.

-Morten

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library
  2021-07-11 13:34     ` Jerin Jacob
@ 2021-07-12  7:40       ` Morten Brørup
  0 siblings, 0 replies; 339+ messages in thread
From: Morten Brørup @ 2021-07-12  7:40 UTC (permalink / raw)
  To: Jerin Jacob, fengchengwen
  Cc: Thomas Monjalon, Ferruh Yigit, Richardson, Bruce, Jerin Jacob,
	dpdk-dev, Nipun Gupta, Hemant Agrawal, Maxime Coquelin,
	Honnappa Nagarahalli, David Marchand, Satananda Burla,
	Prasun Kapoor, Ananyev, Konstantin, liangma

> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Jerin Jacob
> 
> On Sun, Jul 11, 2021 at 3:12 PM fengchengwen <fengchengwen@huawei.com>
> wrote:
> >
> > Note:
> > 1) This patch hold dmadev <> vchan layer, I think vchan can be very
> >    conceptually separated from hw-channel.
> 
> I would like to keep it as channel instead of virtual channel as it is
> implementation-specific.
> No strong opinion on this? @Richardson, Bruce  @Morten Brørup  thoughts

Consider using "context" or "ctx" instead. I might help avoid being mistaken as a DMA hardware feature.

No strong opinion, though. Whatever fits nicely in the documentation is a good choice.

A small anecdote: We once had to name an important parameter in our product, and it came down to the choice of two perfectly good names. We chose the shortest name, only because it fit better into the GUI.

> 
> > 2) I could not under struct dpi_dma_queue_ctx_s, so this patch I
> define
> >    the rte_dma_slave_port_parameters refer to Kunpeng DMA
> implemention.

One more thing:

The DPDK project is aiming to use inclusive code language. (Ref: https://www.dpdk.org/blog/2020/07/22/dpdk-governing-board-update-july-2020/)

Please replace the word "slave" with something politically correct.

-Morten


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH] dmadev: introduce DMA device library
  2021-07-12  7:01                         ` Morten Brørup
@ 2021-07-12  7:59                           ` Jerin Jacob
  0 siblings, 0 replies; 339+ messages in thread
From: Jerin Jacob @ 2021-07-12  7:59 UTC (permalink / raw)
  To: Morten Brørup
  Cc: Bruce Richardson, fengchengwen, Thomas Monjalon, Ferruh Yigit,
	Jerin Jacob, dpdk-dev, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma,
	Radha Mohan Chintakuntla

On Mon, Jul 12, 2021 at 12:31 PM Morten Brørup <mb@smartsharesystems.com> wrote:
>
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Jerin Jacob
>
> > Probably we can remove rte_dmadev_fill_sg() variant and keep sg only
> > for copy to save 8B.
>
> Perhaps the scatter/gather functions can be on a separate cache line, following the cache line with the simple functions?
> Of course, this is only beneficial if the SG functions are not used with simple functions.
> This means that we reserve space for 8 simple functions and 8 SG functions.

Currently, there are only two SG and normal ops functions. IMO, should
be to keep all fastpath functions in the same CL irrespective of it is
simple or not.
My suggestion was more like, if there is no HW support for
rte_dmadev_fill_sg() we can remove it from spec.

>
> And if one or two functions are used with both simple and SG functions, it/they can be present in both cache lines. (This is somewhat dirty, but would be a typical implementation for a DPDK flow data structure.)
>
> -Morten

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library
  2021-07-11  9:25 ` [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library Chengwen Feng
                     ` (2 preceding siblings ...)
  2021-07-12  7:15   ` Morten Brørup
@ 2021-07-12  9:59   ` Jerin Jacob
  2021-07-12 13:32     ` Bruce Richardson
  2021-07-12 12:05   ` Bruce Richardson
                     ` (2 subsequent siblings)
  6 siblings, 1 reply; 339+ messages in thread
From: Jerin Jacob @ 2021-07-12  9:59 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: Thomas Monjalon, Ferruh Yigit, Richardson, Bruce, Jerin Jacob,
	dpdk-dev, Morten Brørup, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma

On Sun, Jul 11, 2021 at 2:59 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
>
> This patch introduce 'dmadevice' which is a generic type of DMA
> device.
>
> The APIs of dmadev library exposes some generic operations which can
> enable configuration and I/O with the DMA devices.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> ---
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a fill operation onto the virtual DMA channel.
> + *
> + * This queues up a fill operation to be performed by hardware, but does not
> + * trigger hardware to begin that operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param pattern
> + *   The pattern to populate the destination buffer with.
> + * @param dst
> + *   The address of the destination buffer.
> + * @param length
> + *   The length of the destination buffer.
> + * @param flags
> + *   An flags for this operation.
> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued copy job.

fill job

> + *   - <0: Error code returned by the driver copy function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
> +               rte_iova_t dst, uint32_t length, uint64_t flags)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +#ifdef RTE_DMADEV_DEBUG
> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
> +       if (vchan >= dev->data->dev_conf.max_vchans) {
> +               RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> +               return -EINVAL;
> +       }
> +#endif
> +       return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
> +}
> +

> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Returns the number of operations that have been successfully completed.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param nb_cpls
> + *   The maximum number of completed operations that can be processed.
> + * @param[out] last_idx
> + *   The last completed operation's index.
> + *   If not required, NULL can be passed in.

This means the driver will be tracking the last index.

Is that mean, the application needs to call this API periodically to
consume the completion slot.
I.e up to 64K (UINT16_MAX)  outstanding jobs are possible. If the
application fails to call this
>64K outstand job then the subsequence enqueue will fail.

If so, we need to document this.

One of the concerns of keeping UINT16_MAX as the limit is the
completion memory will always not in cache.
On the other hand, if we make this size programmable. it may introduce
complexity in the application.

Thoughts?


> + * @param[out] has_error
> + *   Indicates if there are transfer error.
> + *   If not required, NULL can be passed in.
> + *
> + * @return
> + *   The number of operations that successfully completed.
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
> +                    uint16_t *last_idx, bool *has_error)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       uint16_t idx;
> +       bool err;
> +
> +#ifdef RTE_DMADEV_DEBUG
> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, -ENOTSUP);
> +       if (vchan >= dev->data->dev_conf.max_vchans) {
> +               RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> +               return -EINVAL;
> +       }
> +       if (nb_cpls == 0) {
> +               RTE_DMADEV_LOG(ERR, "Invalid nb_cpls\n");
> +               return -EINVAL;
> +       }
> +#endif
> +
> +       /* Ensure the pointer values are non-null to simplify drivers.
> +        * In most cases these should be compile time evaluated, since this is
> +        * an inline function.
> +        * - If NULL is explicitly passed as parameter, then compiler knows the
> +        *   value is NULL
> +        * - If address of local variable is passed as parameter, then compiler
> +        *   can know it's non-NULL.
> +        */
> +       if (last_idx == NULL)
> +               last_idx = &idx;
> +       if (has_error == NULL)
> +               has_error = &err;
> +
> +       *has_error = false;
> +       return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
> +}

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library
  2021-07-11  9:25 ` [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library Chengwen Feng
                     ` (3 preceding siblings ...)
  2021-07-12  9:59   ` Jerin Jacob
@ 2021-07-12 12:05   ` Bruce Richardson
  2021-07-12 15:50   ` Bruce Richardson
  2021-07-13 14:19   ` Ananyev, Konstantin
  6 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-12 12:05 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, dev, mb, nipun.gupta,
	hemant.agrawal, maxime.coquelin, honnappa.nagarahalli,
	david.marchand, sburla, pkapoor, konstantin.ananyev, liangma

On Sun, Jul 11, 2021 at 05:25:56PM +0800, Chengwen Feng wrote:
> This patch introduce 'dmadevice' which is a generic type of DMA
> device.
> 
> The APIs of dmadev library exposes some generic operations which can
> enable configuration and I/O with the DMA devices.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>

Thanks for this V2.
Some initial (mostly minor) comments on the meson.build and dmadev .c file
below. I'll review the headers in a separate email.

/Bruce

> ---
>  MAINTAINERS                  |    4 +
>  config/rte_config.h          |    3 +
>  lib/dmadev/meson.build       |    6 +
>  lib/dmadev/rte_dmadev.c      |  560 +++++++++++++++++++++++
>  lib/dmadev/rte_dmadev.h      | 1030 ++++++++++++++++++++++++++++++++++++++++++
>  lib/dmadev/rte_dmadev_core.h |  159 +++++++
>  lib/dmadev/rte_dmadev_pmd.h  |   72 +++
>  lib/dmadev/version.map       |   40 ++
>  lib/meson.build              |    1 +
>  9 files changed, 1875 insertions(+)
>  create mode 100644 lib/dmadev/meson.build
>  create mode 100644 lib/dmadev/rte_dmadev.c
>  create mode 100644 lib/dmadev/rte_dmadev.h
>  create mode 100644 lib/dmadev/rte_dmadev_core.h
>  create mode 100644 lib/dmadev/rte_dmadev_pmd.h
>  create mode 100644 lib/dmadev/version.map
> 
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 4347555..0595239 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -496,6 +496,10 @@ F: drivers/raw/skeleton/
>  F: app/test/test_rawdev.c
>  F: doc/guides/prog_guide/rawdev.rst
>  
> +DMA device API - EXPERIMENTAL
> +M: Chengwen Feng <fengchengwen@huawei.com>
> +F: lib/dmadev/
> +
>  
>  Memory Pool Drivers
>  -------------------
> diff --git a/config/rte_config.h b/config/rte_config.h
> index 590903c..331a431 100644
> --- a/config/rte_config.h
> +++ b/config/rte_config.h
> @@ -81,6 +81,9 @@
>  /* rawdev defines */
>  #define RTE_RAWDEV_MAX_DEVS 64
>  
> +/* dmadev defines */
> +#define RTE_DMADEV_MAX_DEVS 64
> +
>  /* ip_fragmentation defines */
>  #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
>  #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
> diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
> new file mode 100644
> index 0000000..c918dae
> --- /dev/null
> +++ b/lib/dmadev/meson.build
> @@ -0,0 +1,6 @@
> +# SPDX-License-Identifier: BSD-3-Clause
> +# Copyright(c) 2021 HiSilicon Limited.
> +
> +sources = files('rte_dmadev.c')
> +headers = files('rte_dmadev.h', 'rte_dmadev_pmd.h')

If rte_dmadev_pmd.h is only for PMD use, then it should be in
"driver_sdk_headers".

> +indirect_headers += files('rte_dmadev_core.h')
> diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
> new file mode 100644
> index 0000000..8a29abb
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev.c
> @@ -0,0 +1,560 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + * Copyright(c) 2021 Intel Corporation.
> + */
> +
> +#include <ctype.h>
> +#include <inttypes.h>
> +#include <stdint.h>
> +#include <stdio.h>
> +#include <stdlib.h>
> +#include <string.h>
> +
> +#include <rte_debug.h>
> +#include <rte_dev.h>
> +#include <rte_eal.h>
> +#include <rte_errno.h>
> +#include <rte_lcore.h>
> +#include <rte_log.h>
> +#include <rte_memory.h>
> +#include <rte_memzone.h>
> +#include <rte_malloc.h>
> +#include <rte_string_fns.h>
> +
> +#include "rte_dmadev.h"
> +#include "rte_dmadev_pmd.h"
> +
> +RTE_LOG_REGISTER(rte_dmadev_logtype, lib.dmadev, INFO);
> +
> +struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
> +
> +static const char *MZ_RTE_DMADEV_DATA = "rte_dmadev_data";
> +/* Shared memory between primary and secondary processes. */
> +static struct {
> +	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
> +} *dmadev_shared_data;
> +
> +static int
> +dmadev_check_name(const char *name)
> +{
> +	size_t name_len;
> +
> +	if (name == NULL) {
> +		RTE_DMADEV_LOG(ERR, "Name can't be NULL\n");
> +		return -EINVAL;
> +	}
> +
> +	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
> +	if (name_len == 0) {
> +		RTE_DMADEV_LOG(ERR, "Zero length DMA device name\n");
> +		return -EINVAL;
> +	}
> +	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
> +		RTE_DMADEV_LOG(ERR, "DMA device name is too long\n");
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static uint16_t
> +dmadev_find_free_dev(void)
> +{
> +	uint16_t i;
> +
> +	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
> +		if (dmadev_shared_data->data[i].dev_name[0] == '\0') {
> +			RTE_ASSERT(rte_dmadevices[i].attached == 0);
> +			return i;
> +		}
> +	}
> +
> +	return RTE_DMADEV_MAX_DEVS;
> +}
> +
> +static struct rte_dmadev*
> +dmadev_allocated(const char *name)

The name implies a boolean lookup for whether a particular dmadev has been
allocated or not. Since this returns a pointer, I think a name like
"dmadev_find" or "dmadev_get" would be more appropriate.

> +{
> +	uint16_t i;
> +
> +	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
> +		if ((rte_dmadevices[i].attached == 1) &&
> +		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
> +			return &rte_dmadevices[i];
> +	}
> +
> +	return NULL;
> +}
> +
> +static int
> +dmadev_shared_data_prepare(void)
> +{
> +	const struct rte_memzone *mz;
> +
> +	if (dmadev_shared_data == NULL) {
> +		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
> +			/* Allocate port data and ownership shared memory. */
> +			mz = rte_memzone_reserve(MZ_RTE_DMADEV_DATA,
> +					 sizeof(*dmadev_shared_data),
> +					 rte_socket_id(), 0);
> +		} else {
> +			mz = rte_memzone_lookup(MZ_RTE_DMADEV_DATA);
> +		}
> +		if (mz == NULL)
> +			return -ENOMEM;
> +
> +		dmadev_shared_data = mz->addr;
> +		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
> +			memset(dmadev_shared_data->data, 0,
> +			       sizeof(dmadev_shared_data->data));
> +	}
> +
> +	return 0;
> +}
> +
> +static struct rte_dmadev *
> +dmadev_allocate(const char *name)
> +{
> +	struct rte_dmadev *dev;
> +	uint16_t dev_id;
> +
> +	dev = dmadev_allocated(name);
> +	if (dev != NULL) {
> +		RTE_DMADEV_LOG(ERR, "DMA device already allocated\n");
> +		return NULL;
> +	}
> +
> +	dev_id = dmadev_find_free_dev();
> +	if (dev_id == RTE_DMADEV_MAX_DEVS) {
> +		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices\n");
> +		return NULL;
> +	}
> +
> +	if (dmadev_shared_data_prepare() != 0) {
> +		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
> +		return NULL;
> +	}
> +
> +	dev = &rte_dmadevices[dev_id];
> +	dev->data = &dmadev_shared_data->data[dev_id];
> +	dev->data->dev_id = dev_id;
> +	strlcpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
> +
> +	return dev;
> +}
> +
> +static struct rte_dmadev *
> +dmadev_attach_secondary(const char *name)
> +{
> +	struct rte_dmadev *dev;
> +	uint16_t i;
> +
> +	if (dmadev_shared_data_prepare() != 0) {
> +		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
> +		return NULL;
> +	}
> +
> +	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
> +		if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
> +			break;
> +	}
> +	if (i == RTE_DMADEV_MAX_DEVS) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %s is not driven by the primary process\n",
> +			name);
> +		return NULL;
> +	}
> +
> +	dev = &rte_dmadevices[i];
> +	dev->data = &dmadev_shared_data->data[i];
> +	RTE_ASSERT(dev->data->dev_id == i);
> +
> +	return dev;
> +}
> +
> +struct rte_dmadev *
> +rte_dmadev_pmd_allocate(const char *name)
> +{
> +	struct rte_dmadev *dev;
> +
> +	if (dmadev_check_name(name) != 0)
> +		return NULL;
> +
> +	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
> +		dev = dmadev_allocate(name);
> +	else
> +		dev = dmadev_attach_secondary(name);
> +
> +	if (dev == NULL)
> +		return NULL;
> +	dev->attached = 1;
> +
> +	return dev;
> +}
> +
> +int
> +rte_dmadev_pmd_release(struct rte_dmadev *dev)
> +{
> +	if (dev == NULL)
> +		return -EINVAL;
> +
> +	if (dev->attached == 0)
> +		return 0;
> +
> +	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
> +		rte_free(dev->data->dev_private);
> +		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
> +	}
> +
> +	memset(dev, 0, sizeof(struct rte_dmadev));
> +	dev->attached = 0;
> +
> +	return 0;
> +}
> +
> +struct rte_dmadev *
> +rte_dmadev_get_device_by_name(const char *name)
> +{
> +	if (dmadev_check_name(name) != 0)
> +		return NULL;
> +	return dmadev_allocated(name);
> +}
> +
> +bool
> +rte_dmadev_is_valid_dev(uint16_t dev_id)
> +{
> +	if (dev_id >= RTE_DMADEV_MAX_DEVS ||
> +	    rte_dmadevices[dev_id].attached == 0)
> +		return false;
> +	return true;
> +}
> +
> +uint16_t
> +rte_dmadev_count(void)
> +{
> +	uint16_t count = 0;
> +	uint16_t i;
> +
> +	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
> +		if (rte_dmadevices[i].attached == 1)
> +			count++;
> +	}
> +
> +	return count;
> +}
> +
> +int
> +rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
> +{
> +	struct rte_dmadev *dev;
> +	int ret;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(dev_info, -EINVAL);
> +
> +	dev = &rte_dmadevices[dev_id];
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
> +	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
> +	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info);
> +	if (ret != 0)
> +		return ret;
> +
> +	dev_info->device = dev->device;
> +
> +	return 0;
> +}

Should the info_get function (and the related info structure), not include
in it the parameters passed into the configure function. That way, the user
can query a previously set up configuration. This should be done at the
dmadev level, rather than driver level, since I see the parameters are
already being saved in configure below.

Also, for ABI purposes, I would strongly suggest passing "sizeof(dev_info)"
to the driver in the "dev_info_get" call. When dev_info changes, we can
version rte_dmadev_info_get, but can't version the functions that it calls
in turn. When we add a new field to the struct, the driver functions that
choose to use that new field can check the size of the struct passed to
determine if it's safe to write that new field or not. [So long as field is
added at the end, driver functions not updated for the new field, need no
changes]

> +
> +int
> +rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
> +{
> +	struct rte_dmadev_info info;
> +	struct rte_dmadev *dev;
> +	int ret;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(dev_conf, -EINVAL);
> +	dev = &rte_dmadevices[dev_id];
> +
> +	ret = rte_dmadev_info_get(dev_id, &info);
> +	if (ret != 0) {
> +		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
> +		return -EINVAL;
> +	}
> +	if (dev_conf->max_vchans > info.max_vchans) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u configure too many vchans\n", dev_id);

We allow up to 100 characters per line for DPDK code, so these don't need
to be wrapped so aggressively.

> +		return -EINVAL;
> +	}
> +	if (dev_conf->enable_mt_vchan &&
> +	    !(info.dev_capa & RTE_DMA_DEV_CAPA_MT_VCHAN)) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u don't support MT-safe vchan\n", dev_id);
> +		return -EINVAL;
> +	}
> +	if (dev_conf->enable_mt_multi_vchan &&
> +	    !(info.dev_capa & RTE_DMA_DEV_CAPA_MT_MULTI_VCHAN)) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u don't support MT-safe multiple vchan\n",
> +			dev_id);
> +		return -EINVAL;
> +	}
> +
> +	if (dev->data->dev_started != 0) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u must be stopped to allow configuration\n",
> +			dev_id);
> +		return -EBUSY;
> +	}
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
> +	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf);
> +	if (ret == 0)
> +		memcpy(&dev->data->dev_conf, dev_conf, sizeof(*dev_conf));
> +
> +	return ret;
> +}
> +
> +int
> +rte_dmadev_start(uint16_t dev_id)
> +{
> +	struct rte_dmadev *dev;
> +	int ret;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	dev = &rte_dmadevices[dev_id];
> +
> +	if (dev->data->dev_started != 0) {
> +		RTE_DMADEV_LOG(ERR, "Device %u already started\n", dev_id);

Maybe make this a warning rather than error.

> +		return 0;
> +	}
> +
> +	if (dev->dev_ops->dev_start == NULL)
> +		goto mark_started;
> +
> +	ret = (*dev->dev_ops->dev_start)(dev);
> +	if (ret != 0)
> +		return ret;
> +
> +mark_started:
> +	dev->data->dev_started = 1;
> +	return 0;
> +}
> +
> +int
> +rte_dmadev_stop(uint16_t dev_id)
> +{
> +	struct rte_dmadev *dev;
> +	int ret;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	dev = &rte_dmadevices[dev_id];
> +
> +	if (dev->data->dev_started == 0) {
> +		RTE_DMADEV_LOG(ERR, "Device %u already stopped\n", dev_id);

As above, suggest just warning rather than error.

> +		return 0;
> +	}
> +
> +	if (dev->dev_ops->dev_stop == NULL)
> +		goto mark_stopped;
> +
> +	ret = (*dev->dev_ops->dev_stop)(dev);
> +	if (ret != 0)
> +		return ret;
> +
> +mark_stopped:
> +	dev->data->dev_started = 0;
> +	return 0;
> +}
> +
> +int
> +rte_dmadev_close(uint16_t dev_id)
> +{
> +	struct rte_dmadev *dev;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	dev = &rte_dmadevices[dev_id];
> +
> +	/* Device must be stopped before it can be closed */
> +	if (dev->data->dev_started == 1) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u must be stopped before closing\n", dev_id);
> +		return -EBUSY;
> +	}
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
> +	return (*dev->dev_ops->dev_close)(dev);
> +}
> +
> +int
> +rte_dmadev_reset(uint16_t dev_id)
> +{
> +	struct rte_dmadev *dev;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	dev = &rte_dmadevices[dev_id];
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
> +	/* Reset is not dependent on state of the device */
> +	return (*dev->dev_ops->dev_reset)(dev);
> +}

I would tend to agree with the query as to whether this is needed or not.
Can we perhaps remove for now, and add it back later if it does prove to be
needed. The less code to review and work with for the first version, the
better IMHO. :-)

> +
> +int
> +rte_dmadev_vchan_setup(uint16_t dev_id,
> +		       const struct rte_dmadev_vchan_conf *conf)
> +{
> +	struct rte_dmadev_info info;
> +	struct rte_dmadev *dev;
> +	int ret;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(conf, -EINVAL);

This is confusing, because you are actually doing a parameter check using a
macro named for checking a function. Better to explicitly just check conf
for null.

> +
> +	dev = &rte_dmadevices[dev_id];
> +
> +	ret = rte_dmadev_info_get(dev_id, &info);
> +	if (ret != 0) {
> +		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
> +		return -EINVAL;
> +	}
> +	if (conf->direction == 0 ||
> +	    conf->direction & ~RTE_DMA_TRANSFER_DIR_ALL) {
> +		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!\n", dev_id);
> +		return -EINVAL;
> +	}

I wonder should we allow direction == 0, to be the same as all bits set,
or to be all supported bits set?

> +	if (conf->direction & RTE_DMA_MEM_TO_MEM &&
> +	    !(info.dev_capa & RTE_DMA_DEV_CAPA_MEM_TO_MEM)) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u don't support mem2mem transfer\n", dev_id);
> +		return -EINVAL;
> +	}
> +	if (conf->direction & RTE_DMA_MEM_TO_DEV &&
> +	    !(info.dev_capa & RTE_DMA_DEV_CAPA_MEM_TO_DEV)) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u don't support mem2dev transfer\n", dev_id);
> +		return -EINVAL;
> +	}
> +	if (conf->direction & RTE_DMA_DEV_TO_MEM &&
> +	    !(info.dev_capa & RTE_DMA_DEV_CAPA_DEV_TO_MEM)) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u don't support dev2mem transfer\n", dev_id);
> +		return -EINVAL;
> +	}
> +	if (conf->direction & RTE_DMA_DEV_TO_DEV &&
> +	    !(info.dev_capa & RTE_DMA_DEV_CAPA_DEV_TO_DEV)) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u don't support dev2dev transfer\n", dev_id);
> +		return -EINVAL;
> +	}
> +	if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u number of descriptors invalid\n", dev_id);
> +		return -EINVAL;
> +	}
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
> +	return (*dev->dev_ops->vchan_setup)(dev, conf);
> +}
> +
> +int
> +rte_dmadev_vchan_release(uint16_t dev_id, uint16_t vchan)
> +{
> +	struct rte_dmadev *dev;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	dev = &rte_dmadevices[dev_id];
> +
> +	if (vchan >= dev->data->dev_conf.max_vchans) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u vchan %u out of range\n", dev_id, vchan);
> +		return -EINVAL;
> +	}
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_release, -ENOTSUP);
> +	return (*dev->dev_ops->vchan_release)(dev, vchan);
> +}
> +
> +int
> +rte_dmadev_stats_get(uint16_t dev_id, int vchan, struct rte_dmadev_stats *stats)
> +{
> +	struct rte_dmadev *dev;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(stats, -EINVAL);
> +
> +	dev = &rte_dmadevices[dev_id];
> +
> +	if (vchan < -1 || vchan >= dev->data->dev_conf.max_vchans) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u vchan %u out of range\n", dev_id, vchan);
> +		return -EINVAL;
> +	}
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
> +	return (*dev->dev_ops->stats_get)(dev, vchan, stats);
> +}
> +
> +int
> +rte_dmadev_stats_reset(uint16_t dev_id, int vchan)
> +{
> +	struct rte_dmadev *dev;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	dev = &rte_dmadevices[dev_id];
> +
> +	if (vchan < -1 || vchan >= dev->data->dev_conf.max_vchans) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u vchan %u out of range\n", dev_id, vchan);
> +		return -EINVAL;
> +	}
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
> +	return (*dev->dev_ops->stats_reset)(dev, vchan);
> +}
> +
> +int
> +rte_dmadev_dump(uint16_t dev_id, FILE *f)
> +{
> +	struct rte_dmadev_info info;
> +	struct rte_dmadev *dev;
> +	int ret;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(f, -EINVAL);
> +
> +	ret = rte_dmadev_info_get(dev_id, &info);
> +	if (ret != 0) {
> +		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
> +		return -EINVAL;
> +	}
> +
> +	dev = &rte_dmadevices[dev_id];
> +
> +	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
> +		dev->data->dev_id,
> +		dev->data->dev_name,
> +		dev->data->dev_started ? "started" : "stopped");
> +	fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
> +	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
> +	fprintf(f, "  max_vchans_configured: %u\n", info.nb_vchans);
> +	fprintf(f, "  MT-safe-configured: vchans: %u multi-vchans: %u\n",
> +		dev->data->dev_conf.enable_mt_vchan,
> +		dev->data->dev_conf.enable_mt_multi_vchan);
> +
> +	if (dev->dev_ops->dev_dump != NULL)
> +		return (*dev->dev_ops->dev_dump)(dev, f);
> +
> +	return 0;
> +}
> +
> +int
> +rte_dmadev_selftest(uint16_t dev_id)
> +{
> +	struct rte_dmadev *dev;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	dev = &rte_dmadevices[dev_id];
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
> +	return (*dev->dev_ops->dev_selftest)(dev_id);
> +}

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library
  2021-07-12  9:59   ` Jerin Jacob
@ 2021-07-12 13:32     ` Bruce Richardson
  2021-07-12 16:34       ` Jerin Jacob
  0 siblings, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-07-12 13:32 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Chengwen Feng, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	dpdk-dev, Morten Brørup, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma

On Mon, Jul 12, 2021 at 03:29:27PM +0530, Jerin Jacob wrote:
> On Sun, Jul 11, 2021 at 2:59 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
> >
> > This patch introduce 'dmadevice' which is a generic type of DMA
> > device.
> >
> > The APIs of dmadev library exposes some generic operations which can
> > enable configuration and I/O with the DMA devices.
> >
> > Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> > ---
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Enqueue a fill operation onto the virtual DMA channel.
> > + *
> > + * This queues up a fill operation to be performed by hardware, but does not
> > + * trigger hardware to begin that operation.
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + * @param vchan
> > + *   The identifier of virtual DMA channel.
> > + * @param pattern
> > + *   The pattern to populate the destination buffer with.
> > + * @param dst
> > + *   The address of the destination buffer.
> > + * @param length
> > + *   The length of the destination buffer.
> > + * @param flags
> > + *   An flags for this operation.
> > + *
> > + * @return
> > + *   - 0..UINT16_MAX: index of enqueued copy job.
> 
> fill job
> 
> > + *   - <0: Error code returned by the driver copy function.
> > + */
> > +__rte_experimental
> > +static inline int
> > +rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
> > +               rte_iova_t dst, uint32_t length, uint64_t flags)
> > +{
> > +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> > +#ifdef RTE_DMADEV_DEBUG
> > +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> > +       RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
> > +       if (vchan >= dev->data->dev_conf.max_vchans) {
> > +               RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> > +               return -EINVAL;
> > +       }
> > +#endif
> > +       return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
> > +}
> > +
> 
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Returns the number of operations that have been successfully completed.
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + * @param vchan
> > + *   The identifier of virtual DMA channel.
> > + * @param nb_cpls
> > + *   The maximum number of completed operations that can be processed.
> > + * @param[out] last_idx
> > + *   The last completed operation's index.
> > + *   If not required, NULL can be passed in.
> 
> This means the driver will be tracking the last index.
> 

Driver will be doing this anyway, no, since it needs to ensure we don't
wrap around?

> Is that mean, the application needs to call this API periodically to
> consume the completion slot.
> I.e up to 64K (UINT16_MAX)  outstanding jobs are possible. If the
> application fails to call this
> >64K outstand job then the subsequence enqueue will fail.

Well, given that there will be a regular enqueue ring which will probably
be <= 64k in size, the completion call will need to be called frequently
anyway. I don't think we need to document this restriction as it's fairly
understood that you can't go beyond the size of the ring without cleanup.

> 
> If so, we need to document this.
> 
> One of the concerns of keeping UINT16_MAX as the limit is the
> completion memory will always not in cache.
> On the other hand, if we make this size programmable. it may introduce
> complexity in the application.
> 
> Thoughts?

The reason for using powers-of-2 sizes, e.g. 0 .. UINT16_MAX, is that the
ring can be any other power-of-2 size and we can index it just by masking.
In the sample app for dmadev, I expect the ring size used to be set the
same as the dmadev enqueue ring size, for simplicity.

In fact, I was thinking that in later versions we may be able to include
some macros to help make this whole process easier, of converting indexes
to arbitrary data structures. [The reason for using macros is so that the
actual rings we are indexing can be of user-defined type, rather than just
a ring of pointers].

/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library
  2021-07-11  9:25 ` [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library Chengwen Feng
                     ` (4 preceding siblings ...)
  2021-07-12 12:05   ` Bruce Richardson
@ 2021-07-12 15:50   ` Bruce Richardson
  2021-07-13  9:07     ` Jerin Jacob
  2021-07-13 14:19   ` Ananyev, Konstantin
  6 siblings, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-07-12 15:50 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, dev, mb, nipun.gupta,
	hemant.agrawal, maxime.coquelin, honnappa.nagarahalli,
	david.marchand, sburla, pkapoor, konstantin.ananyev, liangma

On Sun, Jul 11, 2021 at 05:25:56PM +0800, Chengwen Feng wrote:
> This patch introduce 'dmadevice' which is a generic type of DMA
> device.
> 
> The APIs of dmadev library exposes some generic operations which can
> enable configuration and I/O with the DMA devices.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>

Hi again,

some further review comments inline.

/Bruce

> ---
>  MAINTAINERS                  |    4 +
>  config/rte_config.h          |    3 +
>  lib/dmadev/meson.build       |    6 +
>  lib/dmadev/rte_dmadev.c      |  560 +++++++++++++++++++++++
>  lib/dmadev/rte_dmadev.h      | 1030 ++++++++++++++++++++++++++++++++++++++++++
>  lib/dmadev/rte_dmadev_core.h |  159 +++++++
>  lib/dmadev/rte_dmadev_pmd.h  |   72 +++
>  lib/dmadev/version.map       |   40 ++
>  lib/meson.build              |    1 +

<snip>

> diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
> new file mode 100644
> index 0000000..8779512
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev.h
> @@ -0,0 +1,1030 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + * Copyright(c) 2021 Intel Corporation.
> + * Copyright(c) 2021 Marvell International Ltd.
> + */
> +
> +#ifndef _RTE_DMADEV_H_
> +#define _RTE_DMADEV_H_
> +
> +/**
> + * @file rte_dmadev.h
> + *
> + * RTE DMA (Direct Memory Access) device APIs.
> + *
> + * The DMA framework is built on the following model:
> + *
> + *     ---------------   ---------------       ---------------
> + *     | virtual DMA |   | virtual DMA |       | virtual DMA |
> + *     | channel     |   | channel     |       | channel     |
> + *     ---------------   ---------------       ---------------
> + *            |                |                      |
> + *            ------------------                      |
> + *                     |                              |
> + *               ------------                    ------------
> + *               |  dmadev  |                    |  dmadev  |
> + *               ------------                    ------------
> + *                     |                              |
> + *            ------------------               ------------------
> + *            | HW-DMA-channel |               | HW-DMA-channel |
> + *            ------------------               ------------------
> + *                     |                              |
> + *                     --------------------------------
> + *                                     |
> + *                           ---------------------
> + *                           | HW-DMA-Controller |
> + *                           ---------------------
> + *
> + * The DMA controller could have multilpe HW-DMA-channels (aka. HW-DMA-queues),
> + * each HW-DMA-channel should be represented by a dmadev.
> + *
> + * The dmadev could create multiple virtual DMA channel, each virtual DMA
> + * channel represents a different transfer context. The DMA operation request
> + * must be submitted to the virtual DMA channel.
> + * E.G. Application could create virtual DMA channel 0 for mem-to-mem transfer
> + *      scenario, and create virtual DMA channel 1 for mem-to-dev transfer
> + *      scenario.
> + *
> + * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
> + * PCI/SoC device probing phase performed at EAL initialization time. And could
> + * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
> + * phase.
> + *
> + * We use 'uint16_t dev_id' as the device identifier of a dmadev, and
> + * 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
> + *
> + * The functions exported by the dmadev API to setup a device designated by its
> + * device identifier must be invoked in the following order:
> + *     - rte_dmadev_configure()
> + *     - rte_dmadev_vchan_setup()
> + *     - rte_dmadev_start()
> + *
> + * Then, the application can invoke dataplane APIs to process jobs.
> + *
> + * If the application wants to change the configuration (i.e. call
> + * rte_dmadev_configure()), it must call rte_dmadev_stop() first to stop the
> + * device and then do the reconfiguration before calling rte_dmadev_start()
> + * again. The dataplane APIs should not be invoked when the device is stopped.
> + *
> + * Finally, an application can close a dmadev by invoking the
> + * rte_dmadev_close() function.
> + *
> + * The dataplane APIs include two parts:
> + *   a) The first part is the submission of operation requests:
> + *        - rte_dmadev_copy()
> + *        - rte_dmadev_copy_sg() - scatter-gather form of copy
> + *        - rte_dmadev_fill()
> + *        - rte_dmadev_fill_sg() - scatter-gather form of fill
> + *        - rte_dmadev_perform() - issue doorbell to hardware
> + *      These APIs could work with different virtual DMA channels which have
> + *      different contexts.
> + *      The first four APIs are used to submit the operation request to the
> + *      virtual DMA channel, if the submission is successful, a uint16_t
> + *      ring_idx is returned, otherwise a negative number is returned.
> + *   b) The second part is to obtain the result of requests:
> + *        - rte_dmadev_completed()
> + *            - return the number of operation requests completed successfully.
> + *        - rte_dmadev_completed_fails()
> + *            - return the number of operation requests failed to complete.

Please rename this to "completed_status" to allow the return of information
other than just errors. As I suggested before, I think this should also be
usable as a slower version of "completed" even in the case where there are
no errors, in that it returns status information for each and every job
rather than just returning as soon as it hits a failure.

> + * + * About the ring_idx which rte_dmadev_copy/copy_sg/fill/fill_sg()
> returned, + * the rules are as follows: + *   a) ring_idx for each
> virtual DMA channel are independent.  + *   b) For a virtual DMA channel,
> the ring_idx is monotonically incremented, + *      when it reach
> UINT16_MAX, it wraps back to zero.

Based on other feedback, I suggest we put in the detail here that: "This
index can be used by applications to track per-job metadata in an
application-defined circular ring, where the ring is a power-of-2 size, and
the indexes are masked appropriately."

> + *   c) The initial ring_idx of a virtual DMA channel is zero, after the device
> + *      is stopped or reset, the ring_idx needs to be reset to zero.
> + *   Example:
> + *      step-1: start one dmadev
> + *      step-2: enqueue a copy operation, the ring_idx return is 0
> + *      step-3: enqueue a copy operation again, the ring_idx return is 1
> + *      ...
> + *      step-101: stop the dmadev
> + *      step-102: start the dmadev
> + *      step-103: enqueue a copy operation, the cookie return is 0
> + *      ...
> + *      step-x+0: enqueue a fill operation, the ring_idx return is 65535
> + *      step-x+1: enqueue a copy operation, the ring_idx return is 0
> + *      ...
> + *
> + * By default, all the non-dataplane functions of the dmadev API exported by a
> + * PMD are lock-free functions which assume to not be invoked in parallel on
> + * different logical cores to work on the same target object.
> + *
> + * The dataplane functions of the dmadev API exported by a PMD can be MT-safe
> + * only when supported by the driver, generally, the driver will reports two
> + * capabilities:
> + *   a) Whether to support MT-safe for the submit/completion API of the same
> + *      virtual DMA channel.
> + *      E.G. one thread do submit operation, another thread do completion
> + *           operation.
> + *      If driver support it, then declare RTE_DMA_DEV_CAPA_MT_VCHAN.
> + *      If driver don't support it, it's up to the application to guarantee
> + *      MT-safe.
> + *   b) Whether to support MT-safe for different virtual DMA channels.
> + *      E.G. one thread do operation on virtual DMA channel 0, another thread
> + *           do operation on virtual DMA channel 1.
> + *      If driver support it, then declare RTE_DMA_DEV_CAPA_MT_MULTI_VCHAN.
> + *      If driver don't support it, it's up to the application to guarantee
> + *      MT-safe.
> + *
> + */

Just to check - do we have hardware that currently supports these
capabilities? For Intel HW, we will only support one virtual channel per
device without any MT-safety guarantees, so won't be setting either of
these flags. If any of these flags are unused in all planned drivers, we
should drop them from the spec until they prove necessary. Idealy,
everything in the dmadev definition should be testable, and features unused
by anyone obviously will be untested.

> +
> +#include <rte_common.h>
> +#include <rte_compat.h>
> +#include <rte_errno.h>
> +#include <rte_memory.h>
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
> +
> +extern int rte_dmadev_logtype;
> +
> +#define RTE_DMADEV_LOG(level, ...) \
> +	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "" __VA_ARGS__)
> +
> +/* Macros to check for valid port */
> +#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
> +	if (!rte_dmadev_is_valid_dev(dev_id)) { \
> +		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
> +		return retval; \
> +	} \
> +} while (0)
> +
> +#define RTE_DMADEV_VALID_DEV_ID_OR_RET(dev_id) do { \
> +	if (!rte_dmadev_is_valid_dev(dev_id)) { \
> +		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
> +		return; \
> +	} \
> +} while (0)
> +

Can we avoid using these in the inline functions in this file, and move
them to the _pmd.h which is for internal PMD use only? It would mean we
don't get logging from the key dataplane functions, but I would hope the
return values would provide enough info.

Alternatively, can we keep the logtype definition and first macro and move
the other two to the _pmd.h file.

> +/**
> + * @internal
> + * Validate if the DMA device index is a valid attached DMA device.
> + *
> + * @param dev_id
> + *   DMA device index.
> + *
> + * @return
> + *   - If the device index is valid (true) or not (false).
> + */
> +__rte_internal
> +bool
> +rte_dmadev_is_valid_dev(uint16_t dev_id);
> +
> +/**
> + * rte_dma_sg - can hold scatter DMA operation request
> + */
> +struct rte_dma_sg {
> +	rte_iova_t src;
> +	rte_iova_t dst;
> +	uint32_t length;
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Get the total number of DMA devices that have been successfully
> + * initialised.
> + *
> + * @return
> + *   The total number of usable DMA devices.
> + */
> +__rte_experimental
> +uint16_t
> +rte_dmadev_count(void);
> +
> +/**
> + * The capabilities of a DMA device
> + */
> +#define RTE_DMA_DEV_CAPA_MEM_TO_MEM	(1ull << 0)
> +/**< DMA device support mem-to-mem transfer.

Do we need this? Can we assume that any device appearing as a dmadev can
do mem-to-mem copies, and drop the capability for mem-to-mem and the
capability for copying?

> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_MEM_TO_DEV	(1ull << 1)
> +/**< DMA device support slave mode & mem-to-dev transfer.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_DEV_TO_MEM	(1ull << 2)
> +/**< DMA device support slave mode & dev-to-mem transfer.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_DEV_TO_DEV	(1ull << 3)
> +/**< DMA device support slave mode & dev-to-dev transfer.
> + *

Just to confirm, are there devices currently planned for dmadev that
supports only a subset of these flags? Thinking particularly of the
dev-2-mem and mem-2-dev ones here - do any of the devices we are
considering not support using device memory?
[Again, just want to ensure we aren't adding too much stuff that we don't
need yet]

> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_OPS_COPY	(1ull << 4)
> +/**< DMA device support copy ops.
> + *

Suggest dropping this and making it min for dmadev.

> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_OPS_FILL	(1ull << 5)
> +/**< DMA device support fill ops.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_OPS_SG		(1ull << 6)
> +/**< DMA device support scatter-list ops.
> + * If device support ops_copy and ops_sg, it means supporting copy_sg ops.
> + * If device support ops_fill and ops_sg, it means supporting fill_sg ops.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_FENCE		(1ull << 7)
> +/**< DMA device support fence.
> + * If device support fence, then application could set a fence flags when
> + * enqueue operation by rte_dma_copy/copy_sg/fill/fill_sg.
> + * If a operation has a fence flags, it means the operation must be processed
> + * only after all previous operations are completed.
> + *

Is this needed? As I understand it, the Marvell driver doesn't require
fences so providing one is a no-op. Therefore, this flag is probably
unnecessary.

> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_SVA		(1ull << 8)
> +/**< DMA device support SVA which could use VA as DMA address.
> + * If device support SVA then application could pass any VA address like memory
> + * from rte_malloc(), rte_memzone(), malloc, stack memory.
> + * If device don't support SVA, then application should pass IOVA address which
> + * from rte_malloc(), rte_memzone().
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_MT_VCHAN	(1ull << 9)
> +/**< DMA device support MT-safe of a virtual DMA channel.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_MT_MULTI_VCHAN	(1ull << 10)
> +/**< DMA device support MT-safe of different virtual DMA channels.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */

As with comments above - let's check that these will actually be used
before we add them.

> +
> +/**
> + * A structure used to retrieve the contextual information of
> + * an DMA device
> + */
> +struct rte_dmadev_info {
> +	struct rte_device *device; /**< Generic Device information */
> +	uint64_t dev_capa; /**< Device capabilities (RTE_DMA_DEV_CAPA_) */
> +	/** Maximum number of virtual DMA channels supported */
> +	uint16_t max_vchans;
> +	/** Maximum allowed number of virtual DMA channel descriptors */
> +	uint16_t max_desc;
> +	/** Minimum allowed number of virtual DMA channel descriptors */
> +	uint16_t min_desc;
> +	uint16_t nb_vchans; /**< Number of virtual DMA channel configured */
> +};

Let's add rte_dmadev_conf struct into this to return the configuration
settings.

> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Retrieve the contextual information of a DMA device.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param[out] dev_info
> + *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
> + *   contextual information of the device.
> + *
> + * @return
> + *   - =0: Success, driver updates the contextual information of the DMA device
> + *   - <0: Error code returned by the driver info get function.
> + *
> + */
> +__rte_experimental
> +int
> +rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
> +

Should have "const" on second param.

> +/**
> + * A structure used to configure a DMA device.
> + */
> +struct rte_dmadev_conf {
> +	/** Maximum number of virtual DMA channel to use.
> +	 * This value cannot be greater than the field 'max_vchans' of struct
> +	 * rte_dmadev_info which get from rte_dmadev_info_get().
> +	 */
> +	uint16_t max_vchans;
> +	/** Enable bit for MT-safe of a virtual DMA channel.
> +	 * This bit can be enabled only when the device supports
> +	 * RTE_DMA_DEV_CAPA_MT_VCHAN.
> +	 * @see RTE_DMA_DEV_CAPA_MT_VCHAN
> +	 */
> +	uint8_t enable_mt_vchan : 1;
> +	/** Enable bit for MT-safe of different virtual DMA channels.
> +	 * This bit can be enabled only when the device supports
> +	 * RTE_DMA_DEV_CAPA_MT_MULTI_VCHAN.
> +	 * @see RTE_DMA_DEV_CAPA_MT_MULTI_VCHAN
> +	 */
> +	uint8_t enable_mt_multi_vchan : 1;
> +	uint64_t reserved[2]; /**< Reserved for future fields */
> +};

Drop the reserved fields. ABI versioning is a better way to deal with
adding new fields.

> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Configure a DMA device.
> + *
> + * This function must be invoked first before any other function in the
> + * API. This function can also be re-invoked when a device is in the
> + * stopped state.
> + *
> + * @param dev_id
> + *   The identifier of the device to configure.
> + * @param dev_conf
> + *   The DMA device configuration structure encapsulated into rte_dmadev_conf
> + *   object.
> + *
> + * @return
> + *   - =0: Success, device configured.
> + *   - <0: Error code returned by the driver configuration function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Start a DMA device.
> + *
> + * The device start step is the last one and consists of setting the DMA
> + * to start accepting jobs.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - =0: Success, device started.
> + *   - <0: Error code returned by the driver start function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_start(uint16_t dev_id);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Stop a DMA device.
> + *
> + * The device can be restarted with a call to rte_dmadev_start()
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - =0: Success, device stopped.
> + *   - <0: Error code returned by the driver stop function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_stop(uint16_t dev_id);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Close a DMA device.
> + *
> + * The device cannot be restarted after this call.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *  - =0: Successfully close device
> + *  - <0: Failure to close device
> + */
> +__rte_experimental
> +int
> +rte_dmadev_close(uint16_t dev_id);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Reset a DMA device.
> + *
> + * This is different from cycle of rte_dmadev_start->rte_dmadev_stop in the
> + * sense similar to hard or soft reset.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - =0: Successfully reset device.
> + *   - <0: Failure to reset device.
> + *   - (-ENOTSUP): If the device doesn't support this function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_reset(uint16_t dev_id);
> +
> +/**
> + * DMA transfer direction defines.
> + */
> +#define RTE_DMA_MEM_TO_MEM	(1ull << 0)
> +/**< DMA transfer direction - from memory to memory.
> + *
> + * @see struct rte_dmadev_vchan_conf::direction
> + */
> +#define RTE_DMA_MEM_TO_DEV	(1ull << 1)
> +/**< DMA transfer direction - slave mode & from memory to device.
> + * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs. In
> + * this case, the ARM SoCs works in slave mode, it could initiate a DMA move
> + * request from ARM memory to x86 host memory.

For clarity, it would be good to specify in the scenario described which
memory is the "mem" and which is the "dev" (I assume SoC memory is "mem"
and x86 host memory is "dev"??)

> + *
> + * @see struct rte_dmadev_vchan_conf::direction
> + */
> +#define RTE_DMA_DEV_TO_MEM	(1ull << 2)
> +/**< DMA transfer direction - slave mode & from device to memory.
> + * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs. In
> + * this case, the ARM SoCs works in slave mode, it could initiate a DMA move
> + * request from x86 host memory to ARM memory.
> + *
> + * @see struct rte_dmadev_vchan_conf::direction
> + */
> +#define RTE_DMA_DEV_TO_DEV	(1ull << 3)
> +/**< DMA transfer direction - slave mode & from device to device.
> + * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs. In
> + * this case, the ARM SoCs works in slave mode, it could initiate a DMA move
> + * request from x86 host memory to another x86 host memory.
> + *
> + * @see struct rte_dmadev_vchan_conf::direction
> + */
> +#define RTE_DMA_TRANSFER_DIR_ALL	(RTE_DMA_MEM_TO_MEM | \
> +					 RTE_DMA_MEM_TO_DEV | \
> +					 RTE_DMA_DEV_TO_MEM | \
> +					 RTE_DMA_DEV_TO_DEV)
> +
> +/**
> + * enum rte_dma_slave_port_type - slave mode type defines
> + */
> +enum rte_dma_slave_port_type {
> +	/** The slave port is PCIE. */
> +	RTE_DMA_SLAVE_PORT_PCIE = 1,
> +};
> +

As previously mentioned, this needs to be updated to use other terms.
For some suggested alternatives see:
https://doc.dpdk.org/guides-21.05/contributing/coding_style.html#naming

> +/**
> + * A structure used to descript slave port parameters.
> + */
> +struct rte_dma_slave_port_parameters {
> +	enum rte_dma_slave_port_type port_type;
> +	union {
> +		/** For PCIE port */
> +		struct {
> +			/** The physical function number which to use */
> +			uint64_t pf_number : 6;
> +			/** Virtual function enable bit */
> +			uint64_t vf_enable : 1;
> +			/** The virtual function number which to use */
> +			uint64_t vf_number : 8;
> +			uint64_t pasid : 20;
> +			/** The attributes filed in TLP packet */
> +			uint64_t tlp_attr : 3;
> +		};
> +	};
> +};
> +
> +/**
> + * A structure used to configure a virtual DMA channel.
> + */
> +struct rte_dmadev_vchan_conf {
> +	uint8_t direction; /**< Set of supported transfer directions */
> +	/** Number of descriptor for the virtual DMA channel */
> +	uint16_t nb_desc;
> +	/** 1) Used to describes the dev parameter in the mem-to-dev/dev-to-mem
> +	 * transfer scenario.
> +	 * 2) Used to describes the src dev parameter in the dev-to-dev
> +	 * transfer scenario.
> +	 */
> +	struct rte_dma_slave_port_parameters port;
> +	/** Used to describes the dst dev parameters in the dev-to-dev
> +	 * transfer scenario.
> +	 */
> +	struct rte_dma_slave_port_parameters peer_port;
> +	uint64_t reserved[2]; /**< Reserved for future fields */
> +};

Let's drop the reserved fields and use ABI versioning if necesssary in
future.

> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Allocate and set up a virtual DMA channel.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param conf
> + *   The virtual DMA channel configuration structure encapsulated into
> + *   rte_dmadev_vchan_conf object.
> + *
> + * @return
> + *   - >=0: Allocate success, it is the virtual DMA channel id. This value must
> + *          be less than the field 'max_vchans' of struct rte_dmadev_conf
> +	    which configured by rte_dmadev_configure().

nit: whitespace error here.

> + *   - <0: Error code returned by the driver virtual channel setup function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_vchan_setup(uint16_t dev_id,
> +		       const struct rte_dmadev_vchan_conf *conf);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Release a virtual DMA channel.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel which return by vchan setup.
> + *
> + * @return
> + *   - =0: Successfully release the virtual DMA channel.
> + *   - <0: Error code returned by the driver virtual channel release function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_vchan_release(uint16_t dev_id, uint16_t vchan);
> +
> +/**
> + * rte_dmadev_stats - running statistics.
> + */
> +struct rte_dmadev_stats {
> +	/** Count of operations which were successfully enqueued */
> +	uint64_t enqueued_count;
> +	/** Count of operations which were submitted to hardware */
> +	uint64_t submitted_count;
> +	/** Count of operations which failed to complete */
> +	uint64_t completed_fail_count;
> +	/** Count of operations which successfully complete */
> +	uint64_t completed_count;
> +	uint64_t reserved[4]; /**< Reserved for future fields */
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Retrieve basic statistics of a or all virtual DMA channel(s).
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel, -1 means all channels.
> + * @param[out] stats
> + *   The basic statistics structure encapsulated into rte_dmadev_stats
> + *   object.
> + *
> + * @return
> + *   - =0: Successfully retrieve stats.
> + *   - <0: Failure to retrieve stats.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_stats_get(uint16_t dev_id, int vchan,

vchan as uint16_t rather than int, I think. This would apply to all
dataplane functions. There is no need for a signed vchan value.

> +		     struct rte_dmadev_stats *stats);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Reset basic statistics of a or all virtual DMA channel(s).
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel, -1 means all channels.
> + *
> + * @return
> + *   - =0: Successfully reset stats.
> + *   - <0: Failure to reset stats.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_stats_reset(uint16_t dev_id, int vchan);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Dump DMA device info.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param f
> + *   The file to write the output to.
> + *
> + * @return
> + *   0 on success. Non-zero otherwise.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_dump(uint16_t dev_id, FILE *f);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Trigger the dmadev self test.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - 0: Selftest successful.
> + *   - -ENOTSUP if the device doesn't support selftest
> + *   - other values < 0 on failure.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_selftest(uint16_t dev_id);

I don't think this needs to be in the public API, since it should only be
for the autotest app to use. Maybe move the prototype to the _pmd.h (since
we don't have a separate internal header), and then the autotest app can
pick it up from there.

> +
> +#include "rte_dmadev_core.h"
> +
> +/**
> + *  DMA flags to augment operation preparation.
> + *  Used as the 'flags' parameter of rte_dmadev_copy/copy_sg/fill/fill_sg.
> + */
> +#define RTE_DMA_FLAG_FENCE	(1ull << 0)
> +/**< DMA fence flag
> + * It means the operation with this flag must be processed only after all
> + * previous operations are completed.
> + *
> + * @see rte_dmadev_copy()
> + * @see rte_dmadev_copy_sg()
> + * @see rte_dmadev_fill()
> + * @see rte_dmadev_fill_sg()
> + */

As a general comment, I think all these multi-line comments should go
before the item they describe. Comments after should only be used in the
case where the comment fits on the rest of the line after a value.

We also should define the SUBMIT flag as suggested by Jerin, to allow apps
to automatically submit jobs after enqueue.

> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a copy operation onto the virtual DMA channel.
> + *
> + * This queues up a copy operation to be performed by hardware, but does not
> + * trigger hardware to begin that operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param src
> + *   The address of the source buffer.
> + * @param dst
> + *   The address of the destination buffer.
> + * @param length
> + *   The length of the data to be copied.
> + * @param flags
> + *   An flags for this operation.
> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued copy job.
> + *   - <0: Error code returned by the driver copy function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
> +		uint32_t length, uint64_t flags)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +#ifdef RTE_DMADEV_DEBUG
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
> +	if (vchan >= dev->data->dev_conf.max_vchans) {
> +		RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> +		return -EINVAL;
> +	}
> +#endif
> +	return (*dev->copy)(dev, vchan, src, dst, length, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a scatter list copy operation onto the virtual DMA channel.
> + *
> + * This queues up a scatter list copy operation to be performed by hardware,
> + * but does not trigger hardware to begin that operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param sg
> + *   The pointer of scatterlist.
> + * @param sg_len
> + *   The number of scatterlist elements.
> + * @param flags
> + *   An flags for this operation.
> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued copy job.
> + *   - <0: Error code returned by the driver copy function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, const struct rte_dma_sg *sg,
> +		   uint32_t sg_len, uint64_t flags)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +#ifdef RTE_DMADEV_DEBUG
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(sg, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
> +	if (vchan >= dev->data->dev_conf.max_vchans) {
> +		RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> +		return -EINVAL;
> +	}
> +#endif
> +	return (*dev->copy_sg)(dev, vchan, sg, sg_len, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a fill operation onto the virtual DMA channel.
> + *
> + * This queues up a fill operation to be performed by hardware, but does not
> + * trigger hardware to begin that operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param pattern
> + *   The pattern to populate the destination buffer with.
> + * @param dst
> + *   The address of the destination buffer.
> + * @param length
> + *   The length of the destination buffer.
> + * @param flags
> + *   An flags for this operation.
> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued copy job.
> + *   - <0: Error code returned by the driver copy function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
> +		rte_iova_t dst, uint32_t length, uint64_t flags)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +#ifdef RTE_DMADEV_DEBUG
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
> +	if (vchan >= dev->data->dev_conf.max_vchans) {
> +		RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> +		return -EINVAL;
> +	}
> +#endif
> +	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a scatter list fill operation onto the virtual DMA channel.
> + *
> + * This queues up a scatter list fill operation to be performed by hardware,
> + * but does not trigger hardware to begin that operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param pattern
> + *   The pattern to populate the destination buffer with.
> + * @param sg
> + *   The pointer of scatterlist.
> + * @param sg_len
> + *   The number of scatterlist elements.
> + * @param flags
> + *   An flags for this operation.
> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued copy job.
> + *   - <0: Error code returned by the driver copy function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_fill_sg(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
> +		   const struct rte_dma_sg *sg, uint32_t sg_len,
> +		   uint64_t flags)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +#ifdef RTE_DMADEV_DEBUG
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(sg, -ENOTSUP);
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
> +	if (vchan >= dev->data->dev_conf.max_vchans) {
> +		RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> +		return -EINVAL;
> +	}
> +#endif
> +	return (*dev->fill_sg)(dev, vchan, pattern, sg, sg_len, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Trigger hardware to begin performing enqueued operations.
> + *
> + * This API is used to write the "doorbell" to the hardware to trigger it
> + * to begin the operations previously enqueued by rte_dmadev_copy/fill()
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + *
> + * @return
> + *   - =0: Successfully trigger hardware.
> + *   - <0: Failure to trigger hardware.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +#ifdef RTE_DMADEV_DEBUG
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
> +	if (vchan >= dev->data->dev_conf.max_vchans) {
> +		RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> +		return -EINVAL;
> +	}
> +#endif
> +	return (*dev->submit)(dev, vchan);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Returns the number of operations that have been successfully completed.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param nb_cpls
> + *   The maximum number of completed operations that can be processed.
> + * @param[out] last_idx
> + *   The last completed operation's index.
> + *   If not required, NULL can be passed in.
> + * @param[out] has_error
> + *   Indicates if there are transfer error.
> + *   If not required, NULL can be passed in.
> + *
> + * @return
> + *   The number of operations that successfully completed.
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
> +		     uint16_t *last_idx, bool *has_error)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +	uint16_t idx;
> +	bool err;
> +
> +#ifdef RTE_DMADEV_DEBUG
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, -ENOTSUP);
> +	if (vchan >= dev->data->dev_conf.max_vchans) {
> +		RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> +		return -EINVAL;
> +	}
> +	if (nb_cpls == 0) {
> +		RTE_DMADEV_LOG(ERR, "Invalid nb_cpls\n");
> +		return -EINVAL;
> +	}
> +#endif
> +
> +	/* Ensure the pointer values are non-null to simplify drivers.
> +	 * In most cases these should be compile time evaluated, since this is
> +	 * an inline function.
> +	 * - If NULL is explicitly passed as parameter, then compiler knows the
> +	 *   value is NULL
> +	 * - If address of local variable is passed as parameter, then compiler
> +	 *   can know it's non-NULL.
> +	 */
> +	if (last_idx == NULL)
> +		last_idx = &idx;
> +	if (has_error == NULL)
> +		has_error = &err;
> +
> +	*has_error = false;
> +	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
> +}
> +
> +/**
> + * DMA transfer status code defines
> + */
> +enum rte_dma_status_code {
> +	/** The operation completed successfully */
> +	RTE_DMA_STATUS_SUCCESSFUL = 0,
> +	/** The operation failed to complete due active drop
> +	 * This is mainly used when processing dev_stop, allow outstanding
> +	 * requests to be completed as much as possible.
> +	 */
> +	RTE_DMA_STATUS_ACTIVE_DROP,
> +	/** The operation failed to complete due invalid source address */
> +	RTE_DMA_STATUS_INVALID_SRC_ADDR,
> +	/** The operation failed to complete due invalid destination address */
> +	RTE_DMA_STATUS_INVALID_DST_ADDR,
> +	/** The operation failed to complete due invalid length */
> +	RTE_DMA_STATUS_INVALID_LENGTH,
> +	/** The operation failed to complete due invalid opcode
> +	 * The DMA descriptor could have multiple format, which are
> +	 * distinguished by the opcode field.
> +	 */
> +	RTE_DMA_STATUS_INVALID_OPCODE,
> +	/** The operation failed to complete due bus err */
> +	RTE_DMA_STATUS_BUS_ERROR,
> +	/** The operation failed to complete due data poison */
> +	RTE_DMA_STATUS_DATA_POISION,
> +	/** The operation failed to complete due descriptor read error */
> +	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
> +	/** The operation failed to complete due device link error
> +	 * Used to indicates that the link error in the mem-to-dev/dev-to-mem/
> +	 * dev-to-dev transfer scenario.
> +	 */
> +	RTE_DMA_STATUS_DEV_LINK_ERROR,
> +	/** Driver specific status code offset
> +	 * Start status code for the driver to define its own error code.
> +	 */
> +	RTE_DMA_STATUS_DRV_SPECIFIC_OFFSET = 0x10000,
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Returns the number of operations that failed to complete.
> + * NOTE: This API was used when rte_dmadev_completed has_error was set.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param nb_status
> + *   Indicates the size of status array.
> + * @param[out] status
> + *   The error code of operations that failed to complete.
> + *   Some standard error code are described in 'enum rte_dma_status_code'
> + *   @see rte_dma_status_code
> + * @param[out] last_idx
> + *   The last failed completed operation's index.
> + *
> + * @return
> + *   The number of operations that failed to complete.
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vchan,
> +			   const uint16_t nb_status, uint32_t *status,
> +			   uint16_t *last_idx)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +#ifdef RTE_DMADEV_DEBUG
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(status, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(last_idx, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_fails, -ENOTSUP);
> +	if (vchan >= dev->data->dev_conf.max_vchans) {
> +		RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> +		return -EINVAL;
> +	}
> +	if (nb_status == 0) {
> +		RTE_DMADEV_LOG(ERR, "Invalid nb_status\n");
> +		return -EINVAL;
> +	}
> +#endif
> +	return (*dev->completed_fails)(dev, vchan, nb_status, status, last_idx);
> +}
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* _RTE_DMADEV_H_ */
> diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
> new file mode 100644
> index 0000000..410faf0
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev_core.h
> @@ -0,0 +1,159 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + * Copyright(c) 2021 Intel Corporation.
> + */
> +
> +#ifndef _RTE_DMADEV_CORE_H_
> +#define _RTE_DMADEV_CORE_H_
> +
> +/**
> + * @file
> + *
> + * RTE DMA Device internal header.
> + *
> + * This header contains internal data types, that are used by the DMA devices
> + * in order to expose their ops to the class.
> + *
> + * Applications should not use these API directly.
> + *
> + */
> +
> +struct rte_dmadev;
> +
> +/** @internal Used to get device information of a device. */
> +typedef int (*dmadev_info_get_t)(struct rte_dmadev *dev,
> +				 struct rte_dmadev_info *dev_info);

First parameter can be "const"

> +/** @internal Used to configure a device. */
> +typedef int (*dmadev_configure_t)(struct rte_dmadev *dev,
> +				  const struct rte_dmadev_conf *dev_conf);
> +
> +/** @internal Used to start a configured device. */
> +typedef int (*dmadev_start_t)(struct rte_dmadev *dev);
> +
> +/** @internal Used to stop a configured device. */
> +typedef int (*dmadev_stop_t)(struct rte_dmadev *dev);
> +
> +/** @internal Used to close a configured device. */
> +typedef int (*dmadev_close_t)(struct rte_dmadev *dev);
> +
> +/** @internal Used to reset a configured device. */
> +typedef int (*dmadev_reset_t)(struct rte_dmadev *dev);
> +
> +/** @internal Used to allocate and set up a virtual DMA channel. */
> +typedef int (*dmadev_vchan_setup_t)(struct rte_dmadev *dev,
> +				    const struct rte_dmadev_vchan_conf *conf);
> +
> +/** @internal Used to release a virtual DMA channel. */
> +typedef int (*dmadev_vchan_release_t)(struct rte_dmadev *dev, uint16_t vchan);
> +
> +/** @internal Used to retrieve basic statistics. */
> +typedef int (*dmadev_stats_get_t)(struct rte_dmadev *dev, int vchan,
> +				  struct rte_dmadev_stats *stats);

First parameter can be "const"

> +
> +/** @internal Used to reset basic statistics. */
> +typedef int (*dmadev_stats_reset_t)(struct rte_dmadev *dev, int vchan);
> +
> +/** @internal Used to dump internal information. */
> +typedef int (*dmadev_dump_t)(struct rte_dmadev *dev, FILE *f);
> +

First param "const"

> +/** @internal Used to start dmadev selftest. */
> +typedef int (*dmadev_selftest_t)(uint16_t dev_id);
> +

This looks an outlier taking a dev_id. It should take a rawdev parameter.
Most drivers should not need to implement this anyway, as the main unit
tests should be in "test_dmadev.c" in the autotest app.

> +/** @internal Used to enqueue a copy operation. */
> +typedef int (*dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
> +			     rte_iova_t src, rte_iova_t dst,
> +			     uint32_t length, uint64_t flags);
> +
> +/** @internal Used to enqueue a scatter list copy operation. */
> +typedef int (*dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
> +				const struct rte_dma_sg *sg,
> +				uint32_t sg_len, uint64_t flags);
> +
> +/** @internal Used to enqueue a fill operation. */
> +typedef int (*dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
> +			     uint64_t pattern, rte_iova_t dst,
> +			     uint32_t length, uint64_t flags);
> +
> +/** @internal Used to enqueue a scatter list fill operation. */
> +typedef int (*dmadev_fill_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
> +			uint64_t pattern, const struct rte_dma_sg *sg,
> +			uint32_t sg_len, uint64_t flags);
> +
> +/** @internal Used to trigger hardware to begin working. */
> +typedef int (*dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
> +
> +/** @internal Used to return number of successful completed operations. */
> +typedef uint16_t (*dmadev_completed_t)(struct rte_dmadev *dev, uint16_t vchan,
> +				       const uint16_t nb_cpls,
> +				       uint16_t *last_idx, bool *has_error);
> +
> +/** @internal Used to return number of failed completed operations. */
> +typedef uint16_t (*dmadev_completed_fails_t)(struct rte_dmadev *dev,
> +			uint16_t vchan, const uint16_t nb_status,
> +			uint32_t *status, uint16_t *last_idx);
> +
> +/**
> + * DMA device operations function pointer table
> + */
> +struct rte_dmadev_ops {
> +	dmadev_info_get_t dev_info_get;
> +	dmadev_configure_t dev_configure;
> +	dmadev_start_t dev_start;
> +	dmadev_stop_t dev_stop;
> +	dmadev_close_t dev_close;
> +	dmadev_reset_t dev_reset;
> +	dmadev_vchan_setup_t vchan_setup;
> +	dmadev_vchan_release_t vchan_release;
> +	dmadev_stats_get_t stats_get;
> +	dmadev_stats_reset_t stats_reset;
> +	dmadev_dump_t dev_dump;
> +	dmadev_selftest_t dev_selftest;
> +};
> +
> +/**
> + * @internal
> + * The data part, with no function pointers, associated with each DMA device.
> + *
> + * This structure is safe to place in shared memory to be common among different
> + * processes in a multi-process configuration.
> + */
> +struct rte_dmadev_data {
> +	uint16_t dev_id; /**< Device [external] identifier. */
> +	char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
> +	void *dev_private; /**< PMD-specific private data. */
> +	struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
> +	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
> +	uint64_t reserved[4]; /**< Reserved for future fields */
> +} __rte_cache_aligned;
> +

While I generally don't like having reserved space, this is one place where
it makes sense, so +1 for it here.

> +/**
> + * @internal
> + * The generic data structure associated with each DMA device.
> + *
> + * The dataplane APIs are located at the beginning of the structure, along
> + * with the pointer to where all the data elements for the particular device
> + * are stored in shared memory. This split scheme allows the function pointer
> + * and driver data to be per-process, while the actual configuration data for
> + * the device is shared.
> + */
> +struct rte_dmadev {
> +	dmadev_copy_t copy;
> +	dmadev_copy_sg_t copy_sg;
> +	dmadev_fill_t fill;
> +	dmadev_fill_sg_t fill_sg;
> +	dmadev_submit_t submit;
> +	dmadev_completed_t completed;
> +	dmadev_completed_fails_t completed_fails;
> +	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
> +	/** Flag indicating the device is attached: ATTACHED(1)/DETACHED(0). */
> +	uint8_t attached : 1;

Since it's in the midst of a series of pointers, this 1-bit flag is
actually using 8-bytes of space. Is it needed. Can we use dev_ops == NULL
or data == NULL instead to indicate this is a valid entry?

> +	/** Device info which supplied during device initialization. */
> +	struct rte_device *device;
> +	struct rte_dmadev_data *data; /**< Pointer to device data. */

If we are to try and minimise cacheline access, we should put this data
pointer - or even better a copy of data->private pointer - at the top of
the structure on the same cacheline as datapath operations. For dataplane,
I can't see any elements of data, except the private pointer being
accessed, so we would probably get most benefit for having a copy put there
on init of the dmadev struct.

> +	uint64_t reserved[4]; /**< Reserved for future fields */
> +} __rte_cache_aligned;
> +
> +extern struct rte_dmadev rte_dmadevices[];
> +
> +#endif /* _RTE_DMADEV_CORE_H_ */
> diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
> new file mode 100644
> index 0000000..45141f9
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev_pmd.h
> @@ -0,0 +1,72 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + */
> +
> +#ifndef _RTE_DMADEV_PMD_H_
> +#define _RTE_DMADEV_PMD_H_
> +
> +/**
> + * @file
> + *
> + * RTE DMA Device PMD APIs
> + *
> + * Driver facing APIs for a DMA device. These are not to be called directly by
> + * any application.
> + */
> +
> +#include "rte_dmadev.h"
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +/**
> + * @internal
> + * Allocates a new dmadev slot for an DMA device and returns the pointer
> + * to that slot for the driver to use.
> + *
> + * @param name
> + *   DMA device name.
> + *
> + * @return
> + *   A pointer to the DMA device slot case of success,
> + *   NULL otherwise.
> + */
> +__rte_internal
> +struct rte_dmadev *
> +rte_dmadev_pmd_allocate(const char *name);
> +
> +/**
> + * @internal
> + * Release the specified dmadev.
> + *
> + * @param dev
> + *   Device to be released.
> + *
> + * @return
> + *   - 0 on success, negative on error
> + */
> +__rte_internal
> +int
> +rte_dmadev_pmd_release(struct rte_dmadev *dev);
> +
> +/**
> + * @internal
> + * Return the DMA device based on the device name.
> + *
> + * @param name
> + *   DMA device name.
> + *
> + * @return
> + *   A pointer to the DMA device slot case of success,
> + *   NULL otherwise.
> + */
> +__rte_internal
> +struct rte_dmadev *
> +rte_dmadev_get_device_by_name(const char *name);
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* _RTE_DMADEV_PMD_H_ */
> diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
> new file mode 100644
> index 0000000..0f099e7
> --- /dev/null
> +++ b/lib/dmadev/version.map
> @@ -0,0 +1,40 @@
> +EXPERIMENTAL {
> +	global:
> +
> +	rte_dmadev_count;
> +	rte_dmadev_info_get;
> +	rte_dmadev_configure;
> +	rte_dmadev_start;
> +	rte_dmadev_stop;
> +	rte_dmadev_close;
> +	rte_dmadev_reset;
> +	rte_dmadev_vchan_setup;
> +	rte_dmadev_vchan_release;
> +	rte_dmadev_stats_get;
> +	rte_dmadev_stats_reset;
> +	rte_dmadev_dump;
> +	rte_dmadev_selftest;
> +	rte_dmadev_copy;
> +	rte_dmadev_copy_sg;
> +	rte_dmadev_fill;
> +	rte_dmadev_fill_sg;
> +	rte_dmadev_submit;
> +	rte_dmadev_completed;
> +	rte_dmadev_completed_fails;
> +
> +	local: *;
> +};

The elements in the version.map file blocks should be sorted alphabetically.

> +
> +INTERNAL {
> +        global:
> +
> +	rte_dmadevices;
> +	rte_dmadev_pmd_allocate;
> +	rte_dmadev_pmd_release;
> +	rte_dmadev_get_device_by_name;
> +
> +	local:
> +
> +	rte_dmadev_is_valid_dev;
> +};
> +
> diff --git a/lib/meson.build b/lib/meson.build
> index 1673ca4..68d239f 100644
> --- a/lib/meson.build
> +++ b/lib/meson.build
> @@ -60,6 +60,7 @@ libraries = [
>          'bpf',
>          'graph',
>          'node',
> +        'dmadev',
>  ]
>  
>  if is_windows
> -- 
> 2.8.1
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library
  2021-07-12 13:32     ` Bruce Richardson
@ 2021-07-12 16:34       ` Jerin Jacob
  2021-07-12 17:00         ` Bruce Richardson
  0 siblings, 1 reply; 339+ messages in thread
From: Jerin Jacob @ 2021-07-12 16:34 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: Chengwen Feng, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	dpdk-dev, Morten Brørup, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma

On Mon, Jul 12, 2021 at 7:02 PM Bruce Richardson
<bruce.richardson@intel.com> wrote:
>
> On Mon, Jul 12, 2021 at 03:29:27PM +0530, Jerin Jacob wrote:
> > On Sun, Jul 11, 2021 at 2:59 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
> > >
> > > This patch introduce 'dmadevice' which is a generic type of DMA
> > > device.
> > >
> > > The APIs of dmadev library exposes some generic operations which can
> > > enable configuration and I/O with the DMA devices.
> > >
> > > Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> > > ---
> > > +/**
> > > + * @warning
> > > + * @b EXPERIMENTAL: this API may change without prior notice.
> > > + *
> > > + * Enqueue a fill operation onto the virtual DMA channel.
> > > + *
> > > + * This queues up a fill operation to be performed by hardware, but does not
> > > + * trigger hardware to begin that operation.
> > > + *
> > > + * @param dev_id
> > > + *   The identifier of the device.
> > > + * @param vchan
> > > + *   The identifier of virtual DMA channel.
> > > + * @param pattern
> > > + *   The pattern to populate the destination buffer with.
> > > + * @param dst
> > > + *   The address of the destination buffer.
> > > + * @param length
> > > + *   The length of the destination buffer.
> > > + * @param flags
> > > + *   An flags for this operation.
> > > + *
> > > + * @return
> > > + *   - 0..UINT16_MAX: index of enqueued copy job.
> >
> > fill job
> >
> > > + *   - <0: Error code returned by the driver copy function.
> > > + */
> > > +__rte_experimental
> > > +static inline int
> > > +rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
> > > +               rte_iova_t dst, uint32_t length, uint64_t flags)
> > > +{
> > > +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> > > +#ifdef RTE_DMADEV_DEBUG
> > > +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> > > +       RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
> > > +       if (vchan >= dev->data->dev_conf.max_vchans) {
> > > +               RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> > > +               return -EINVAL;
> > > +       }
> > > +#endif
> > > +       return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
> > > +}
> > > +
> >
> > > +/**
> > > + * @warning
> > > + * @b EXPERIMENTAL: this API may change without prior notice.
> > > + *
> > > + * Returns the number of operations that have been successfully completed.
> > > + *
> > > + * @param dev_id
> > > + *   The identifier of the device.
> > > + * @param vchan
> > > + *   The identifier of virtual DMA channel.
> > > + * @param nb_cpls
> > > + *   The maximum number of completed operations that can be processed.
> > > + * @param[out] last_idx
> > > + *   The last completed operation's index.
> > > + *   If not required, NULL can be passed in.
> >
> > This means the driver will be tracking the last index.
> >
>
> Driver will be doing this anyway, no, since it needs to ensure we don't

Yes.

> wrap around?


>
> > Is that mean, the application needs to call this API periodically to
> > consume the completion slot.
> > I.e up to 64K (UINT16_MAX)  outstanding jobs are possible. If the
> > application fails to call this
> > >64K outstand job then the subsequence enqueue will fail.
>
> Well, given that there will be a regular enqueue ring which will probably
> be <= 64k in size, the completion call will need to be called frequently
> anyway. I don't think we need to document this restriction as it's fairly
> understood that you can't go beyond the size of the ring without cleanup.


See below.

>
> >
> > If so, we need to document this.
> >
> > One of the concerns of keeping UINT16_MAX as the limit is the
> > completion memory will always not in cache.
> > On the other hand, if we make this size programmable. it may introduce
> > complexity in the application.
> >
> > Thoughts?
>
> The reason for using powers-of-2 sizes, e.g. 0 .. UINT16_MAX, is that the
> ring can be any other power-of-2 size and we can index it just by masking.
> In the sample app for dmadev, I expect the ring size used to be set the
> same as the dmadev enqueue ring size, for simplicity.

No question on not using power of 2. Aligned on that.

At least in our HW, the size of the ring is rte_dmadev_vchan_conf::nb_desc.
But completion happens in _different_ memory space. Currently, we are allocating
UINT16_MAX entries to hold that. That's where cache miss aspects of
completion aspects
came.

In your case, Is completion happens in the same ring memory(looks like
one bit in job desc represents the job completed or not) ?
And when application calls rte_dmadev_completed(), You  are converting
UINT16_MAX based index to
rte_dmadev_vchan_conf::nb_desc. Right?


>
> In fact, I was thinking that in later versions we may be able to include
> some macros to help make this whole process easier, of converting indexes
> to arbitrary data structures. [The reason for using macros is so that the
> actual rings we are indexing can be of user-defined type, rather than just
> a ring of pointers].
>
> /Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library
  2021-07-12 16:34       ` Jerin Jacob
@ 2021-07-12 17:00         ` Bruce Richardson
  2021-07-13  8:59           ` Jerin Jacob
  0 siblings, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-07-12 17:00 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Chengwen Feng, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	dpdk-dev, Morten Brørup, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma

On Mon, Jul 12, 2021 at 10:04:07PM +0530, Jerin Jacob wrote:
> On Mon, Jul 12, 2021 at 7:02 PM Bruce Richardson
> <bruce.richardson@intel.com> wrote:
> >
> > On Mon, Jul 12, 2021 at 03:29:27PM +0530, Jerin Jacob wrote:
> > > On Sun, Jul 11, 2021 at 2:59 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
> > > >
> > > > This patch introduce 'dmadevice' which is a generic type of DMA
> > > > device.
> > > >
> > > > The APIs of dmadev library exposes some generic operations which can
> > > > enable configuration and I/O with the DMA devices.
> > > >
> > > > Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> > > > ---
> > > > +/**
> > > > + * @warning
> > > > + * @b EXPERIMENTAL: this API may change without prior notice.
> > > > + *
> > > > + * Enqueue a fill operation onto the virtual DMA channel.
> > > > + *
> > > > + * This queues up a fill operation to be performed by hardware, but does not
> > > > + * trigger hardware to begin that operation.
> > > > + *
> > > > + * @param dev_id
> > > > + *   The identifier of the device.
> > > > + * @param vchan
> > > > + *   The identifier of virtual DMA channel.
> > > > + * @param pattern
> > > > + *   The pattern to populate the destination buffer with.
> > > > + * @param dst
> > > > + *   The address of the destination buffer.
> > > > + * @param length
> > > > + *   The length of the destination buffer.
> > > > + * @param flags
> > > > + *   An flags for this operation.
> > > > + *
> > > > + * @return
> > > > + *   - 0..UINT16_MAX: index of enqueued copy job.
> > >
> > > fill job
> > >
> > > > + *   - <0: Error code returned by the driver copy function.
> > > > + */
> > > > +__rte_experimental
> > > > +static inline int
> > > > +rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
> > > > +               rte_iova_t dst, uint32_t length, uint64_t flags)
> > > > +{
> > > > +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> > > > +#ifdef RTE_DMADEV_DEBUG
> > > > +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> > > > +       RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
> > > > +       if (vchan >= dev->data->dev_conf.max_vchans) {
> > > > +               RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> > > > +               return -EINVAL;
> > > > +       }
> > > > +#endif
> > > > +       return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
> > > > +}
> > > > +
> > >
> > > > +/**
> > > > + * @warning
> > > > + * @b EXPERIMENTAL: this API may change without prior notice.
> > > > + *
> > > > + * Returns the number of operations that have been successfully completed.
> > > > + *
> > > > + * @param dev_id
> > > > + *   The identifier of the device.
> > > > + * @param vchan
> > > > + *   The identifier of virtual DMA channel.
> > > > + * @param nb_cpls
> > > > + *   The maximum number of completed operations that can be processed.
> > > > + * @param[out] last_idx
> > > > + *   The last completed operation's index.
> > > > + *   If not required, NULL can be passed in.
> > >
> > > This means the driver will be tracking the last index.
> > >
> >
> > Driver will be doing this anyway, no, since it needs to ensure we don't
> 
> Yes.
> 
> > wrap around?
> 
> 
> >
> > > Is that mean, the application needs to call this API periodically to
> > > consume the completion slot.
> > > I.e up to 64K (UINT16_MAX)  outstanding jobs are possible. If the
> > > application fails to call this
> > > >64K outstand job then the subsequence enqueue will fail.
> >
> > Well, given that there will be a regular enqueue ring which will probably
> > be <= 64k in size, the completion call will need to be called frequently
> > anyway. I don't think we need to document this restriction as it's fairly
> > understood that you can't go beyond the size of the ring without cleanup.
> 
> 
> See below.
> 
> >
> > >
> > > If so, we need to document this.
> > >
> > > One of the concerns of keeping UINT16_MAX as the limit is the
> > > completion memory will always not in cache.
> > > On the other hand, if we make this size programmable. it may introduce
> > > complexity in the application.
> > >
> > > Thoughts?
> >
> > The reason for using powers-of-2 sizes, e.g. 0 .. UINT16_MAX, is that the
> > ring can be any other power-of-2 size and we can index it just by masking.
> > In the sample app for dmadev, I expect the ring size used to be set the
> > same as the dmadev enqueue ring size, for simplicity.
> 
> No question on not using power of 2. Aligned on that.
> 
> At least in our HW, the size of the ring is rte_dmadev_vchan_conf::nb_desc.
> But completion happens in _different_ memory space. Currently, we are allocating
> UINT16_MAX entries to hold that. That's where cache miss aspects of
> completion aspects
> came.

Depending on HW, our completions can be written back to a separate memory
area - a completion ring, if you will - but I've generally found it works
as well to reuse the enqueue ring for that purpose. However, with a
separate memory area for completions, why do you need to allocate 64K
entries for the completions? Would nb_desc entries not be enough? Is that
to allow the user to have more than nb_desc jobs outstanding before calling
"get_completions" API?

> 
> In your case, Is completion happens in the same ring memory(looks like
> one bit in job desc represents the job completed or not) ?
> And when application calls rte_dmadev_completed(), You  are converting
> UINT16_MAX based index to
> rte_dmadev_vchan_conf::nb_desc. Right?

Yes, we are masking to do that. Actually, for simplicity and perf we should
only allow power-of-2 ring sizes. Having to use modulus instead of masking
could be a problem. [Alternatively, I suppose we can allow drivers to round
up the ring sizes to the next power of 2, but I prefer just documenting it
as a limitation].

/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library
  2021-07-12 17:00         ` Bruce Richardson
@ 2021-07-13  8:59           ` Jerin Jacob
  0 siblings, 0 replies; 339+ messages in thread
From: Jerin Jacob @ 2021-07-13  8:59 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: Chengwen Feng, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	dpdk-dev, Morten Brørup, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma

On Mon, Jul 12, 2021 at 10:30 PM Bruce Richardson
<bruce.richardson@intel.com> wrote:
>
> On Mon, Jul 12, 2021 at 10:04:07PM +0530, Jerin Jacob wrote:
> > On Mon, Jul 12, 2021 at 7:02 PM Bruce Richardson
> > <bruce.richardson@intel.com> wrote:
> > >
> > > On Mon, Jul 12, 2021 at 03:29:27PM +0530, Jerin Jacob wrote:
> > > > On Sun, Jul 11, 2021 at 2:59 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
> > > > >
> > > > > This patch introduce 'dmadevice' which is a generic type of DMA
> > > > > device.
> > > > >
> > > > > The APIs of dmadev library exposes some generic operations which can
> > > > > enable configuration and I/O with the DMA devices.
> > > > >
> > > > > Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> > > > > ---
> > > > > +/**
> > > > > + * @warning
> > > > > + * @b EXPERIMENTAL: this API may change without prior notice.
> > > > > + *
> > > > > + * Enqueue a fill operation onto the virtual DMA channel.
> > > > > + *
> > > > > + * This queues up a fill operation to be performed by hardware, but does not
> > > > > + * trigger hardware to begin that operation.
> > > > > + *
> > > > > + * @param dev_id
> > > > > + *   The identifier of the device.
> > > > > + * @param vchan
> > > > > + *   The identifier of virtual DMA channel.
> > > > > + * @param pattern
> > > > > + *   The pattern to populate the destination buffer with.
> > > > > + * @param dst
> > > > > + *   The address of the destination buffer.
> > > > > + * @param length
> > > > > + *   The length of the destination buffer.
> > > > > + * @param flags
> > > > > + *   An flags for this operation.
> > > > > + *
> > > > > + * @return
> > > > > + *   - 0..UINT16_MAX: index of enqueued copy job.
> > > >
> > > > fill job
> > > >
> > > > > + *   - <0: Error code returned by the driver copy function.
> > > > > + */
> > > > > +__rte_experimental
> > > > > +static inline int
> > > > > +rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
> > > > > +               rte_iova_t dst, uint32_t length, uint64_t flags)
> > > > > +{
> > > > > +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> > > > > +#ifdef RTE_DMADEV_DEBUG
> > > > > +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> > > > > +       RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
> > > > > +       if (vchan >= dev->data->dev_conf.max_vchans) {
> > > > > +               RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> > > > > +               return -EINVAL;
> > > > > +       }
> > > > > +#endif
> > > > > +       return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
> > > > > +}
> > > > > +
> > > >
> > > > > +/**
> > > > > + * @warning
> > > > > + * @b EXPERIMENTAL: this API may change without prior notice.
> > > > > + *
> > > > > + * Returns the number of operations that have been successfully completed.
> > > > > + *
> > > > > + * @param dev_id
> > > > > + *   The identifier of the device.
> > > > > + * @param vchan
> > > > > + *   The identifier of virtual DMA channel.
> > > > > + * @param nb_cpls
> > > > > + *   The maximum number of completed operations that can be processed.
> > > > > + * @param[out] last_idx
> > > > > + *   The last completed operation's index.
> > > > > + *   If not required, NULL can be passed in.
> > > >
> > > > This means the driver will be tracking the last index.
> > > >
> > >
> > > Driver will be doing this anyway, no, since it needs to ensure we don't
> >
> > Yes.
> >
> > > wrap around?
> >
> >
> > >
> > > > Is that mean, the application needs to call this API periodically to
> > > > consume the completion slot.
> > > > I.e up to 64K (UINT16_MAX)  outstanding jobs are possible. If the
> > > > application fails to call this
> > > > >64K outstand job then the subsequence enqueue will fail.
> > >
> > > Well, given that there will be a regular enqueue ring which will probably
> > > be <= 64k in size, the completion call will need to be called frequently
> > > anyway. I don't think we need to document this restriction as it's fairly
> > > understood that you can't go beyond the size of the ring without cleanup.
> >
> >
> > See below.
> >
> > >
> > > >
> > > > If so, we need to document this.
> > > >
> > > > One of the concerns of keeping UINT16_MAX as the limit is the
> > > > completion memory will always not in cache.
> > > > On the other hand, if we make this size programmable. it may introduce
> > > > complexity in the application.
> > > >
> > > > Thoughts?
> > >
> > > The reason for using powers-of-2 sizes, e.g. 0 .. UINT16_MAX, is that the
> > > ring can be any other power-of-2 size and we can index it just by masking.
> > > In the sample app for dmadev, I expect the ring size used to be set the
> > > same as the dmadev enqueue ring size, for simplicity.
> >
> > No question on not using power of 2. Aligned on that.
> >
> > At least in our HW, the size of the ring is rte_dmadev_vchan_conf::nb_desc.
> > But completion happens in _different_ memory space. Currently, we are allocating
> > UINT16_MAX entries to hold that. That's where cache miss aspects of
> > completion aspects
> > came.
>
> Depending on HW, our completions can be written back to a separate memory
> area - a completion ring, if you will - but I've generally found it works
> as well to reuse the enqueue ring for that purpose. However, with a
> separate memory area for completions, why do you need to allocate 64K
> entries for the completions? Would nb_desc entries not be enough? Is that
> to allow the user to have more than nb_desc jobs outstanding before calling
> "get_completions" API?

Yes. That's what I thought. Thats where my question on what is the max number of
outstanding completions. I thought it can be up to 64K. Agree to keep
it implementation-specific and not need to highlight this in the
documentation.


>
> >
> > In your case, Is completion happens in the same ring memory(looks like
> > one bit in job desc represents the job completed or not) ?
> > And when application calls rte_dmadev_completed(), You  are converting
> > UINT16_MAX based index to
> > rte_dmadev_vchan_conf::nb_desc. Right?
>
> Yes, we are masking to do that. Actually, for simplicity and perf we should
> only allow power-of-2 ring sizes. Having to use modulus instead of masking
> could be a problem. [Alternatively, I suppose we can allow drivers to round
> up the ring sizes to the next power of 2, but I prefer just documenting it
> as a limitation].

OK.

>
> /Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library
  2021-07-12 15:50   ` Bruce Richardson
@ 2021-07-13  9:07     ` Jerin Jacob
  0 siblings, 0 replies; 339+ messages in thread
From: Jerin Jacob @ 2021-07-13  9:07 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: Chengwen Feng, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	dpdk-dev, Morten Brørup, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin, liangma

On Mon, Jul 12, 2021 at 9:21 PM Bruce Richardson
<bruce.richardson@intel.com> wrote:
>
> On Sun, Jul 11, 2021 at 05:25:56PM +0800, Chengwen Feng wrote:
> > This patch introduce 'dmadevice' which is a generic type of DMA
> > device.
> >
> > The APIs of dmadev library exposes some generic operations which can
> > enable configuration and I/O with the DMA devices.
> >
> > Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
>
> Hi again,
>
> some further review comments inline.
>
> /Bruce
>
> > ---
> >  MAINTAINERS                  |    4 +
> >  config/rte_config.h          |    3 +
> >  lib/dmadev/meson.build       |    6 +
> >  lib/dmadev/rte_dmadev.c      |  560 +++++++++++++++++++++++
> >  lib/dmadev/rte_dmadev.h      | 1030 ++++++++++++++++++++++++++++++++++++++++++
> >  lib/dmadev/rte_dmadev_core.h |  159 +++++++
> >  lib/dmadev/rte_dmadev_pmd.h  |   72 +++
> >  lib/dmadev/version.map       |   40 ++
> >  lib/meson.build              |    1 +
>
> <snip>
>
> > diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
> > new file mode 100644
> > index 0000000..8779512
> > --- /dev/null
> > +++ b/lib/dmadev/rte_dmadev.h
> > @@ -0,0 +1,1030 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2021 HiSilicon Limited.
> > + * Copyright(c) 2021 Intel Corporation.
> > + * Copyright(c) 2021 Marvell International Ltd.
> > + */
> > +
> > +#ifndef _RTE_DMADEV_H_
> > +#define _RTE_DMADEV_H_
> > +
> > +/**
> > + * @file rte_dmadev.h
> > + *
> > + * RTE DMA (Direct Memory Access) device APIs.
> > + *
> > + * The DMA framework is built on the following model:
> > + *
> > + *     ---------------   ---------------       ---------------
> > + *     | virtual DMA |   | virtual DMA |       | virtual DMA |
> > + *     | channel     |   | channel     |       | channel     |
> > + *     ---------------   ---------------       ---------------
> > + *            |                |                      |
> > + *            ------------------                      |
> > + *                     |                              |
> > + *               ------------                    ------------
> > + *               |  dmadev  |                    |  dmadev  |
> > + *               ------------                    ------------
> > + *                     |                              |
> > + *            ------------------               ------------------
> > + *            | HW-DMA-channel |               | HW-DMA-channel |
> > + *            ------------------               ------------------
> > + *                     |                              |
> > + *                     --------------------------------
> > + *                                     |
> > + *                           ---------------------
> > + *                           | HW-DMA-Controller |
> > + *                           ---------------------
> > + *
> > + * The DMA controller could have multilpe HW-DMA-channels (aka. HW-DMA-queues),
> > + * each HW-DMA-channel should be represented by a dmadev.
> > + *
> > + * The dmadev could create multiple virtual DMA channel, each virtual DMA
> > + * channel represents a different transfer context. The DMA operation request
> > + * must be submitted to the virtual DMA channel.
> > + * E.G. Application could create virtual DMA channel 0 for mem-to-mem transfer
> > + *      scenario, and create virtual DMA channel 1 for mem-to-dev transfer
> > + *      scenario.
> > + *
> > + * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
> > + * PCI/SoC device probing phase performed at EAL initialization time. And could
> > + * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
> > + * phase.
> > + *
> > + * We use 'uint16_t dev_id' as the device identifier of a dmadev, and
> > + * 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
> > + *
> > + * The functions exported by the dmadev API to setup a device designated by its
> > + * device identifier must be invoked in the following order:
> > + *     - rte_dmadev_configure()
> > + *     - rte_dmadev_vchan_setup()
> > + *     - rte_dmadev_start()
> > + *
> > + * Then, the application can invoke dataplane APIs to process jobs.
> > + *
> > + * If the application wants to change the configuration (i.e. call
> > + * rte_dmadev_configure()), it must call rte_dmadev_stop() first to stop the
> > + * device and then do the reconfiguration before calling rte_dmadev_start()
> > + * again. The dataplane APIs should not be invoked when the device is stopped.
> > + *
> > + * Finally, an application can close a dmadev by invoking the
> > + * rte_dmadev_close() function.
> > + *
> > + * The dataplane APIs include two parts:
> > + *   a) The first part is the submission of operation requests:
> > + *        - rte_dmadev_copy()
> > + *        - rte_dmadev_copy_sg() - scatter-gather form of copy
> > + *        - rte_dmadev_fill()
> > + *        - rte_dmadev_fill_sg() - scatter-gather form of fill
> > + *        - rte_dmadev_perform() - issue doorbell to hardware
> > + *      These APIs could work with different virtual DMA channels which have
> > + *      different contexts.
> > + *      The first four APIs are used to submit the operation request to the
> > + *      virtual DMA channel, if the submission is successful, a uint16_t
> > + *      ring_idx is returned, otherwise a negative number is returned.
> > + *   b) The second part is to obtain the result of requests:
> > + *        - rte_dmadev_completed()
> > + *            - return the number of operation requests completed successfully.
> > + *        - rte_dmadev_completed_fails()
> > + *            - return the number of operation requests failed to complete.
>
> Please rename this to "completed_status" to allow the return of information
> other than just errors. As I suggested before, I think this should also be
> usable as a slower version of "completed" even in the case where there are
> no errors, in that it returns status information for each and every job
> rather than just returning as soon as it hits a failure.
>
> > + * + * About the ring_idx which rte_dmadev_copy/copy_sg/fill/fill_sg()
> > returned, + * the rules are as follows: + *   a) ring_idx for each
> > virtual DMA channel are independent.  + *   b) For a virtual DMA channel,
> > the ring_idx is monotonically incremented, + *      when it reach
> > UINT16_MAX, it wraps back to zero.
>
> Based on other feedback, I suggest we put in the detail here that: "This
> index can be used by applications to track per-job metadata in an
> application-defined circular ring, where the ring is a power-of-2 size, and
> the indexes are masked appropriately."
>
> > + *   c) The initial ring_idx of a virtual DMA channel is zero, after the device
> > + *      is stopped or reset, the ring_idx needs to be reset to zero.
> > + *   Example:
> > + *      step-1: start one dmadev
> > + *      step-2: enqueue a copy operation, the ring_idx return is 0
> > + *      step-3: enqueue a copy operation again, the ring_idx return is 1
> > + *      ...
> > + *      step-101: stop the dmadev
> > + *      step-102: start the dmadev
> > + *      step-103: enqueue a copy operation, the cookie return is 0
> > + *      ...
> > + *      step-x+0: enqueue a fill operation, the ring_idx return is 65535
> > + *      step-x+1: enqueue a copy operation, the ring_idx return is 0
> > + *      ...
> > + *
> > + * By default, all the non-dataplane functions of the dmadev API exported by a
> > + * PMD are lock-free functions which assume to not be invoked in parallel on
> > + * different logical cores to work on the same target object.
> > + *
> > + * The dataplane functions of the dmadev API exported by a PMD can be MT-safe
> > + * only when supported by the driver, generally, the driver will reports two
> > + * capabilities:
> > + *   a) Whether to support MT-safe for the submit/completion API of the same
> > + *      virtual DMA channel.
> > + *      E.G. one thread do submit operation, another thread do completion
> > + *           operation.
> > + *      If driver support it, then declare RTE_DMA_DEV_CAPA_MT_VCHAN.
> > + *      If driver don't support it, it's up to the application to guarantee
> > + *      MT-safe.
> > + *   b) Whether to support MT-safe for different virtual DMA channels.
> > + *      E.G. one thread do operation on virtual DMA channel 0, another thread
> > + *           do operation on virtual DMA channel 1.
> > + *      If driver support it, then declare RTE_DMA_DEV_CAPA_MT_MULTI_VCHAN.
> > + *      If driver don't support it, it's up to the application to guarantee
> > + *      MT-safe.
> > + *
> > + */
>
> Just to check - do we have hardware that currently supports these
> capabilities? For Intel HW, we will only support one virtual channel per
> device without any MT-safety guarantees, so won't be setting either of
> these flags. If any of these flags are unused in all planned drivers, we
> should drop them from the spec until they prove necessary. Idealy,
> everything in the dmadev definition should be testable, and features unused
> by anyone obviously will be untested.
>
> > +
> > +#include <rte_common.h>
> > +#include <rte_compat.h>
> > +#include <rte_errno.h>
> > +#include <rte_memory.h>
> > +
> > +#ifdef __cplusplus
> > +extern "C" {
> > +#endif
> > +
> > +#define RTE_DMADEV_NAME_MAX_LEN      RTE_DEV_NAME_MAX_LEN
> > +
> > +extern int rte_dmadev_logtype;
> > +
> > +#define RTE_DMADEV_LOG(level, ...) \
> > +     rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "" __VA_ARGS__)
> > +
> > +/* Macros to check for valid port */
> > +#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
> > +     if (!rte_dmadev_is_valid_dev(dev_id)) { \
> > +             RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
> > +             return retval; \
> > +     } \
> > +} while (0)
> > +
> > +#define RTE_DMADEV_VALID_DEV_ID_OR_RET(dev_id) do { \
> > +     if (!rte_dmadev_is_valid_dev(dev_id)) { \
> > +             RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
> > +             return; \
> > +     } \
> > +} while (0)
> > +
>
> Can we avoid using these in the inline functions in this file, and move
> them to the _pmd.h which is for internal PMD use only? It would mean we
> don't get logging from the key dataplane functions, but I would hope the
> return values would provide enough info.
>
> Alternatively, can we keep the logtype definition and first macro and move
> the other two to the _pmd.h file.
>
> > +/**
> > + * @internal
> > + * Validate if the DMA device index is a valid attached DMA device.
> > + *
> > + * @param dev_id
> > + *   DMA device index.
> > + *
> > + * @return
> > + *   - If the device index is valid (true) or not (false).
> > + */
> > +__rte_internal
> > +bool
> > +rte_dmadev_is_valid_dev(uint16_t dev_id);
> > +
> > +/**
> > + * rte_dma_sg - can hold scatter DMA operation request
> > + */
> > +struct rte_dma_sg {
> > +     rte_iova_t src;
> > +     rte_iova_t dst;
> > +     uint32_t length;
> > +};
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Get the total number of DMA devices that have been successfully
> > + * initialised.
> > + *
> > + * @return
> > + *   The total number of usable DMA devices.
> > + */
> > +__rte_experimental
> > +uint16_t
> > +rte_dmadev_count(void);
> > +
> > +/**
> > + * The capabilities of a DMA device
> > + */
> > +#define RTE_DMA_DEV_CAPA_MEM_TO_MEM  (1ull << 0)
> > +/**< DMA device support mem-to-mem transfer.
>
> Do we need this? Can we assume that any device appearing as a dmadev can
> do mem-to-mem copies, and drop the capability for mem-to-mem and the
> capability for copying?
>
> > + *
> > + * @see struct rte_dmadev_info::dev_capa
> > + */
> > +#define RTE_DMA_DEV_CAPA_MEM_TO_DEV  (1ull << 1)
> > +/**< DMA device support slave mode & mem-to-dev transfer.
> > + *
> > + * @see struct rte_dmadev_info::dev_capa
> > + */
> > +#define RTE_DMA_DEV_CAPA_DEV_TO_MEM  (1ull << 2)
> > +/**< DMA device support slave mode & dev-to-mem transfer.
> > + *
> > + * @see struct rte_dmadev_info::dev_capa
> > + */
> > +#define RTE_DMA_DEV_CAPA_DEV_TO_DEV  (1ull << 3)
> > +/**< DMA device support slave mode & dev-to-dev transfer.
> > + *
>
> Just to confirm, are there devices currently planned for dmadev that

We are planning to use this support as our exiting raw driver has this.

> supports only a subset of these flags? Thinking particularly of the
> dev-2-mem and mem-2-dev ones here - do any of the devices we are
> considering not support using device memory?
> [Again, just want to ensure we aren't adding too much stuff that we don't
> need yet]



>
> > + * @see struct rte_dmadev_info::dev_capa
> > + */
> > +#define RTE_DMA_DEV_CAPA_OPS_COPY    (1ull << 4)
> > +/**< DMA device support copy ops.
> > + *
>
> Suggest dropping this and making it min for dmadev.
>
> > + * @see struct rte_dmadev_info::dev_capa
> > + */
> > +#define RTE_DMA_DEV_CAPA_OPS_FILL    (1ull << 5)
> > +/**< DMA device support fill ops.
> > + *
> > + * @see struct rte_dmadev_info::dev_capa
> > + */
> > +#define RTE_DMA_DEV_CAPA_OPS_SG              (1ull << 6)
> > +/**< DMA device support scatter-list ops.
> > + * If device support ops_copy and ops_sg, it means supporting copy_sg ops.
> > + * If device support ops_fill and ops_sg, it means supporting fill_sg ops.
> > + *
> > + * @see struct rte_dmadev_info::dev_capa
> > + */
> > +#define RTE_DMA_DEV_CAPA_FENCE               (1ull << 7)
> > +/**< DMA device support fence.
> > + * If device support fence, then application could set a fence flags when
> > + * enqueue operation by rte_dma_copy/copy_sg/fill/fill_sg.
> > + * If a operation has a fence flags, it means the operation must be processed
> > + * only after all previous operations are completed.
> > + *
>
> Is this needed? As I understand it, the Marvell driver doesn't require
> fences so providing one is a no-op. Therefore, this flag is probably
> unnecessary.

+1

>
> > + * @see struct rte_dmadev_info::dev_capa
> > + */
> > +#define RTE_DMA_DEV_CAPA_SVA         (1ull << 8)
> > +/**< DMA device support SVA which could use VA as DMA address.
> > + * If device support SVA then application could pass any VA address like memory
> > + * from rte_malloc(), rte_memzone(), malloc, stack memory.
> > + * If device don't support SVA, then application should pass IOVA address which
> > + * from rte_malloc(), rte_memzone().
> > + *
> > + * @see struct rte_dmadev_info::dev_capa
> > + */
> > +#define RTE_DMA_DEV_CAPA_MT_VCHAN    (1ull << 9)
> > +/**< DMA device support MT-safe of a virtual DMA channel.
> > + *
> > + * @see struct rte_dmadev_info::dev_capa
> > + */
> > +#define RTE_DMA_DEV_CAPA_MT_MULTI_VCHAN      (1ull << 10)
> > +/**< DMA device support MT-safe of different virtual DMA channels.
> > + *
> > + * @see struct rte_dmadev_info::dev_capa
> > + */
>
> As with comments above - let's check that these will actually be used
> before we add them.
>
> > +
> > +/**
> > + * A structure used to retrieve the contextual information of
> > + * an DMA device
> > + */
> > +struct rte_dmadev_info {
> > +     struct rte_device *device; /**< Generic Device information */
> > +     uint64_t dev_capa; /**< Device capabilities (RTE_DMA_DEV_CAPA_) */
> > +     /** Maximum number of virtual DMA channels supported */
> > +     uint16_t max_vchans;
> > +     /** Maximum allowed number of virtual DMA channel descriptors */
> > +     uint16_t max_desc;
> > +     /** Minimum allowed number of virtual DMA channel descriptors */
> > +     uint16_t min_desc;
> > +     uint16_t nb_vchans; /**< Number of virtual DMA channel configured */
> > +};
>
> Let's add rte_dmadev_conf struct into this to return the configuration
> settings.
>
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Retrieve the contextual information of a DMA device.
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + * @param[out] dev_info
> > + *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
> > + *   contextual information of the device.
> > + *
> > + * @return
> > + *   - =0: Success, driver updates the contextual information of the DMA device
> > + *   - <0: Error code returned by the driver info get function.
> > + *
> > + */
> > +__rte_experimental
> > +int
> > +rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
> > +
>
> Should have "const" on second param.
>
> > +/**
> > + * A structure used to configure a DMA device.
> > + */
> > +struct rte_dmadev_conf {
> > +     /** Maximum number of virtual DMA channel to use.
> > +      * This value cannot be greater than the field 'max_vchans' of struct
> > +      * rte_dmadev_info which get from rte_dmadev_info_get().
> > +      */
> > +     uint16_t max_vchans;
> > +     /** Enable bit for MT-safe of a virtual DMA channel.
> > +      * This bit can be enabled only when the device supports
> > +      * RTE_DMA_DEV_CAPA_MT_VCHAN.
> > +      * @see RTE_DMA_DEV_CAPA_MT_VCHAN
> > +      */
> > +     uint8_t enable_mt_vchan : 1;
> > +     /** Enable bit for MT-safe of different virtual DMA channels.
> > +      * This bit can be enabled only when the device supports
> > +      * RTE_DMA_DEV_CAPA_MT_MULTI_VCHAN.
> > +      * @see RTE_DMA_DEV_CAPA_MT_MULTI_VCHAN
> > +      */
> > +     uint8_t enable_mt_multi_vchan : 1;
> > +     uint64_t reserved[2]; /**< Reserved for future fields */
> > +};
>
> Drop the reserved fields. ABI versioning is a better way to deal with
> adding new fields.

+1

>
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Configure a DMA device.
> > + *
> > + * This function must be invoked first before any other function in the
> > + * API. This function can also be re-invoked when a device is in the
> > + * stopped state.
> > + *
> > + * @param dev_id
> > + *   The identifier of the device to configure.
> > + * @param dev_conf
> > + *   The DMA device configuration structure encapsulated into rte_dmadev_conf
> > + *   object.
> > + *
> > + * @return
> > + *   - =0: Success, device configured.
> > + *   - <0: Error code returned by the driver configuration function.
> > + */
> > +__rte_experimental
> > +int
> > +rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Start a DMA device.
> > + *
> > + * The device start step is the last one and consists of setting the DMA
> > + * to start accepting jobs.
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + *
> > + * @return
> > + *   - =0: Success, device started.
> > + *   - <0: Error code returned by the driver start function.
> > + */
> > +__rte_experimental
> > +int
> > +rte_dmadev_start(uint16_t dev_id);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Stop a DMA device.
> > + *
> > + * The device can be restarted with a call to rte_dmadev_start()
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + *
> > + * @return
> > + *   - =0: Success, device stopped.
> > + *   - <0: Error code returned by the driver stop function.
> > + */
> > +__rte_experimental
> > +int
> > +rte_dmadev_stop(uint16_t dev_id);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Close a DMA device.
> > + *
> > + * The device cannot be restarted after this call.
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + *
> > + * @return
> > + *  - =0: Successfully close device
> > + *  - <0: Failure to close device
> > + */
> > +__rte_experimental
> > +int
> > +rte_dmadev_close(uint16_t dev_id);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Reset a DMA device.
> > + *
> > + * This is different from cycle of rte_dmadev_start->rte_dmadev_stop in the
> > + * sense similar to hard or soft reset.
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + *
> > + * @return
> > + *   - =0: Successfully reset device.
> > + *   - <0: Failure to reset device.
> > + *   - (-ENOTSUP): If the device doesn't support this function.
> > + */
> > +__rte_experimental
> > +int
> > +rte_dmadev_reset(uint16_t dev_id);
> > +
> > +/**
> > + * DMA transfer direction defines.
> > + */
> > +#define RTE_DMA_MEM_TO_MEM   (1ull << 0)
> > +/**< DMA transfer direction - from memory to memory.
> > + *
> > + * @see struct rte_dmadev_vchan_conf::direction
> > + */
> > +#define RTE_DMA_MEM_TO_DEV   (1ull << 1)
> > +/**< DMA transfer direction - slave mode & from memory to device.
> > + * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs. In
> > + * this case, the ARM SoCs works in slave mode, it could initiate a DMA move
> > + * request from ARM memory to x86 host memory.
>
> For clarity, it would be good to specify in the scenario described which
> memory is the "mem" and which is the "dev" (I assume SoC memory is "mem"
> and x86 host memory is "dev"??)
>
> > + *
> > + * @see struct rte_dmadev_vchan_conf::direction
> > + */
> > +#define RTE_DMA_DEV_TO_MEM   (1ull << 2)
> > +/**< DMA transfer direction - slave mode & from device to memory.
> > + * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs. In
> > + * this case, the ARM SoCs works in slave mode, it could initiate a DMA move
> > + * request from x86 host memory to ARM memory.
> > + *
> > + * @see struct rte_dmadev_vchan_conf::direction
> > + */
> > +#define RTE_DMA_DEV_TO_DEV   (1ull << 3)
> > +/**< DMA transfer direction - slave mode & from device to device.
> > + * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs. In
> > + * this case, the ARM SoCs works in slave mode, it could initiate a DMA move
> > + * request from x86 host memory to another x86 host memory.
> > + *
> > + * @see struct rte_dmadev_vchan_conf::direction
> > + */
> > +#define RTE_DMA_TRANSFER_DIR_ALL     (RTE_DMA_MEM_TO_MEM | \
> > +                                      RTE_DMA_MEM_TO_DEV | \
> > +                                      RTE_DMA_DEV_TO_MEM | \
> > +                                      RTE_DMA_DEV_TO_DEV)
> > +
> > +/**
> > + * enum rte_dma_slave_port_type - slave mode type defines
> > + */
> > +enum rte_dma_slave_port_type {
> > +     /** The slave port is PCIE. */
> > +     RTE_DMA_SLAVE_PORT_PCIE = 1,
> > +};
> > +
>
> As previously mentioned, this needs to be updated to use other terms.
> For some suggested alternatives see:
> https://doc.dpdk.org/guides-21.05/contributing/coding_style.html#naming
>
> > +/**
> > + * A structure used to descript slave port parameters.
> > + */
> > +struct rte_dma_slave_port_parameters {
> > +     enum rte_dma_slave_port_type port_type;
> > +     union {
> > +             /** For PCIE port */
> > +             struct {
> > +                     /** The physical function number which to use */
> > +                     uint64_t pf_number : 6;
> > +                     /** Virtual function enable bit */
> > +                     uint64_t vf_enable : 1;
> > +                     /** The virtual function number which to use */
> > +                     uint64_t vf_number : 8;
> > +                     uint64_t pasid : 20;
> > +                     /** The attributes filed in TLP packet */
> > +                     uint64_t tlp_attr : 3;
> > +             };
> > +     };
> > +};
> > +
> > +/**
> > + * A structure used to configure a virtual DMA channel.
> > + */
> > +struct rte_dmadev_vchan_conf {
> > +     uint8_t direction; /**< Set of supported transfer directions */
> > +     /** Number of descriptor for the virtual DMA channel */
> > +     uint16_t nb_desc;
> > +     /** 1) Used to describes the dev parameter in the mem-to-dev/dev-to-mem
> > +      * transfer scenario.
> > +      * 2) Used to describes the src dev parameter in the dev-to-dev
> > +      * transfer scenario.
> > +      */
> > +     struct rte_dma_slave_port_parameters port;
> > +     /** Used to describes the dst dev parameters in the dev-to-dev
> > +      * transfer scenario.
> > +      */
> > +     struct rte_dma_slave_port_parameters peer_port;
> > +     uint64_t reserved[2]; /**< Reserved for future fields */
> > +};
>
> Let's drop the reserved fields and use ABI versioning if necesssary in
> future.
>
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Allocate and set up a virtual DMA channel.
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + * @param conf
> > + *   The virtual DMA channel configuration structure encapsulated into
> > + *   rte_dmadev_vchan_conf object.
> > + *
> > + * @return
> > + *   - >=0: Allocate success, it is the virtual DMA channel id. This value must
> > + *          be less than the field 'max_vchans' of struct rte_dmadev_conf
> > +         which configured by rte_dmadev_configure().
>
> nit: whitespace error here.
>
> > + *   - <0: Error code returned by the driver virtual channel setup function.
> > + */
> > +__rte_experimental
> > +int
> > +rte_dmadev_vchan_setup(uint16_t dev_id,
> > +                    const struct rte_dmadev_vchan_conf *conf);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Release a virtual DMA channel.
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + * @param vchan
> > + *   The identifier of virtual DMA channel which return by vchan setup.
> > + *
> > + * @return
> > + *   - =0: Successfully release the virtual DMA channel.
> > + *   - <0: Error code returned by the driver virtual channel release function.
> > + */
> > +__rte_experimental
> > +int
> > +rte_dmadev_vchan_release(uint16_t dev_id, uint16_t vchan);
> > +
> > +/**
> > + * rte_dmadev_stats - running statistics.
> > + */
> > +struct rte_dmadev_stats {
> > +     /** Count of operations which were successfully enqueued */
> > +     uint64_t enqueued_count;
> > +     /** Count of operations which were submitted to hardware */
> > +     uint64_t submitted_count;
> > +     /** Count of operations which failed to complete */
> > +     uint64_t completed_fail_count;
> > +     /** Count of operations which successfully complete */
> > +     uint64_t completed_count;
> > +     uint64_t reserved[4]; /**< Reserved for future fields */
> > +};
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Retrieve basic statistics of a or all virtual DMA channel(s).
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + * @param vchan
> > + *   The identifier of virtual DMA channel, -1 means all channels.
> > + * @param[out] stats
> > + *   The basic statistics structure encapsulated into rte_dmadev_stats
> > + *   object.
> > + *
> > + * @return
> > + *   - =0: Successfully retrieve stats.
> > + *   - <0: Failure to retrieve stats.
> > + */
> > +__rte_experimental
> > +int
> > +rte_dmadev_stats_get(uint16_t dev_id, int vchan,
>
> vchan as uint16_t rather than int, I think. This would apply to all
> dataplane functions. There is no need for a signed vchan value.
>
> > +                  struct rte_dmadev_stats *stats);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Reset basic statistics of a or all virtual DMA channel(s).
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + * @param vchan
> > + *   The identifier of virtual DMA channel, -1 means all channels.
> > + *
> > + * @return
> > + *   - =0: Successfully reset stats.
> > + *   - <0: Failure to reset stats.
> > + */
> > +__rte_experimental
> > +int
> > +rte_dmadev_stats_reset(uint16_t dev_id, int vchan);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Dump DMA device info.
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + * @param f
> > + *   The file to write the output to.
> > + *
> > + * @return
> > + *   0 on success. Non-zero otherwise.
> > + */
> > +__rte_experimental
> > +int
> > +rte_dmadev_dump(uint16_t dev_id, FILE *f);
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Trigger the dmadev self test.
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + *
> > + * @return
> > + *   - 0: Selftest successful.
> > + *   - -ENOTSUP if the device doesn't support selftest
> > + *   - other values < 0 on failure.
> > + */
> > +__rte_experimental
> > +int
> > +rte_dmadev_selftest(uint16_t dev_id);
>
> I don't think this needs to be in the public API, since it should only be
> for the autotest app to use. Maybe move the prototype to the _pmd.h (since
> we don't have a separate internal header), and then the autotest app can
> pick it up from there.
>
> > +
> > +#include "rte_dmadev_core.h"
> > +
> > +/**
> > + *  DMA flags to augment operation preparation.
> > + *  Used as the 'flags' parameter of rte_dmadev_copy/copy_sg/fill/fill_sg.
> > + */
> > +#define RTE_DMA_FLAG_FENCE   (1ull << 0)
> > +/**< DMA fence flag
> > + * It means the operation with this flag must be processed only after all
> > + * previous operations are completed.
> > + *
> > + * @see rte_dmadev_copy()
> > + * @see rte_dmadev_copy_sg()
> > + * @see rte_dmadev_fill()
> > + * @see rte_dmadev_fill_sg()
> > + */
>
> As a general comment, I think all these multi-line comments should go
> before the item they describe. Comments after should only be used in the
> case where the comment fits on the rest of the line after a value.
>
> We also should define the SUBMIT flag as suggested by Jerin, to allow apps
> to automatically submit jobs after enqueue.
>
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Enqueue a copy operation onto the virtual DMA channel.
> > + *
> > + * This queues up a copy operation to be performed by hardware, but does not
> > + * trigger hardware to begin that operation.
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + * @param vchan
> > + *   The identifier of virtual DMA channel.
> > + * @param src
> > + *   The address of the source buffer.
> > + * @param dst
> > + *   The address of the destination buffer.
> > + * @param length
> > + *   The length of the data to be copied.
> > + * @param flags
> > + *   An flags for this operation.
> > + *
> > + * @return
> > + *   - 0..UINT16_MAX: index of enqueued copy job.
> > + *   - <0: Error code returned by the driver copy function.
> > + */
> > +__rte_experimental
> > +static inline int
> > +rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
> > +             uint32_t length, uint64_t flags)
> > +{
> > +     struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> > +#ifdef RTE_DMADEV_DEBUG
> > +     RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> > +     RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
> > +     if (vchan >= dev->data->dev_conf.max_vchans) {
> > +             RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> > +             return -EINVAL;
> > +     }
> > +#endif
> > +     return (*dev->copy)(dev, vchan, src, dst, length, flags);
> > +}
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Enqueue a scatter list copy operation onto the virtual DMA channel.
> > + *
> > + * This queues up a scatter list copy operation to be performed by hardware,
> > + * but does not trigger hardware to begin that operation.
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + * @param vchan
> > + *   The identifier of virtual DMA channel.
> > + * @param sg
> > + *   The pointer of scatterlist.
> > + * @param sg_len
> > + *   The number of scatterlist elements.
> > + * @param flags
> > + *   An flags for this operation.
> > + *
> > + * @return
> > + *   - 0..UINT16_MAX: index of enqueued copy job.
> > + *   - <0: Error code returned by the driver copy function.
> > + */
> > +__rte_experimental
> > +static inline int
> > +rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, const struct rte_dma_sg *sg,
> > +                uint32_t sg_len, uint64_t flags)
> > +{
> > +     struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> > +#ifdef RTE_DMADEV_DEBUG
> > +     RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> > +     RTE_FUNC_PTR_OR_ERR_RET(sg, -EINVAL);
> > +     RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
> > +     if (vchan >= dev->data->dev_conf.max_vchans) {
> > +             RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> > +             return -EINVAL;
> > +     }
> > +#endif
> > +     return (*dev->copy_sg)(dev, vchan, sg, sg_len, flags);
> > +}
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Enqueue a fill operation onto the virtual DMA channel.
> > + *
> > + * This queues up a fill operation to be performed by hardware, but does not
> > + * trigger hardware to begin that operation.
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + * @param vchan
> > + *   The identifier of virtual DMA channel.
> > + * @param pattern
> > + *   The pattern to populate the destination buffer with.
> > + * @param dst
> > + *   The address of the destination buffer.
> > + * @param length
> > + *   The length of the destination buffer.
> > + * @param flags
> > + *   An flags for this operation.
> > + *
> > + * @return
> > + *   - 0..UINT16_MAX: index of enqueued copy job.
> > + *   - <0: Error code returned by the driver copy function.
> > + */
> > +__rte_experimental
> > +static inline int
> > +rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
> > +             rte_iova_t dst, uint32_t length, uint64_t flags)
> > +{
> > +     struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> > +#ifdef RTE_DMADEV_DEBUG
> > +     RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> > +     RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
> > +     if (vchan >= dev->data->dev_conf.max_vchans) {
> > +             RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> > +             return -EINVAL;
> > +     }
> > +#endif
> > +     return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
> > +}
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Enqueue a scatter list fill operation onto the virtual DMA channel.
> > + *
> > + * This queues up a scatter list fill operation to be performed by hardware,
> > + * but does not trigger hardware to begin that operation.
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + * @param vchan
> > + *   The identifier of virtual DMA channel.
> > + * @param pattern
> > + *   The pattern to populate the destination buffer with.
> > + * @param sg
> > + *   The pointer of scatterlist.
> > + * @param sg_len
> > + *   The number of scatterlist elements.
> > + * @param flags
> > + *   An flags for this operation.
> > + *
> > + * @return
> > + *   - 0..UINT16_MAX: index of enqueued copy job.
> > + *   - <0: Error code returned by the driver copy function.
> > + */
> > +__rte_experimental
> > +static inline int
> > +rte_dmadev_fill_sg(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
> > +                const struct rte_dma_sg *sg, uint32_t sg_len,
> > +                uint64_t flags)
> > +{
> > +     struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> > +#ifdef RTE_DMADEV_DEBUG
> > +     RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> > +     RTE_FUNC_PTR_OR_ERR_RET(sg, -ENOTSUP);
> > +     RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
> > +     if (vchan >= dev->data->dev_conf.max_vchans) {
> > +             RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> > +             return -EINVAL;
> > +     }
> > +#endif
> > +     return (*dev->fill_sg)(dev, vchan, pattern, sg, sg_len, flags);
> > +}
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Trigger hardware to begin performing enqueued operations.
> > + *
> > + * This API is used to write the "doorbell" to the hardware to trigger it
> > + * to begin the operations previously enqueued by rte_dmadev_copy/fill()
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + * @param vchan
> > + *   The identifier of virtual DMA channel.
> > + *
> > + * @return
> > + *   - =0: Successfully trigger hardware.
> > + *   - <0: Failure to trigger hardware.
> > + */
> > +__rte_experimental
> > +static inline int
> > +rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
> > +{
> > +     struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> > +#ifdef RTE_DMADEV_DEBUG
> > +     RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> > +     RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
> > +     if (vchan >= dev->data->dev_conf.max_vchans) {
> > +             RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> > +             return -EINVAL;
> > +     }
> > +#endif
> > +     return (*dev->submit)(dev, vchan);
> > +}
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Returns the number of operations that have been successfully completed.
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + * @param vchan
> > + *   The identifier of virtual DMA channel.
> > + * @param nb_cpls
> > + *   The maximum number of completed operations that can be processed.
> > + * @param[out] last_idx
> > + *   The last completed operation's index.
> > + *   If not required, NULL can be passed in.
> > + * @param[out] has_error
> > + *   Indicates if there are transfer error.
> > + *   If not required, NULL can be passed in.
> > + *
> > + * @return
> > + *   The number of operations that successfully completed.
> > + */
> > +__rte_experimental
> > +static inline uint16_t
> > +rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
> > +                  uint16_t *last_idx, bool *has_error)
> > +{
> > +     struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> > +     uint16_t idx;
> > +     bool err;
> > +
> > +#ifdef RTE_DMADEV_DEBUG
> > +     RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> > +     RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, -ENOTSUP);
> > +     if (vchan >= dev->data->dev_conf.max_vchans) {
> > +             RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> > +             return -EINVAL;
> > +     }
> > +     if (nb_cpls == 0) {
> > +             RTE_DMADEV_LOG(ERR, "Invalid nb_cpls\n");
> > +             return -EINVAL;
> > +     }
> > +#endif
> > +
> > +     /* Ensure the pointer values are non-null to simplify drivers.
> > +      * In most cases these should be compile time evaluated, since this is
> > +      * an inline function.
> > +      * - If NULL is explicitly passed as parameter, then compiler knows the
> > +      *   value is NULL
> > +      * - If address of local variable is passed as parameter, then compiler
> > +      *   can know it's non-NULL.
> > +      */
> > +     if (last_idx == NULL)
> > +             last_idx = &idx;
> > +     if (has_error == NULL)
> > +             has_error = &err;
> > +
> > +     *has_error = false;
> > +     return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
> > +}
> > +
> > +/**
> > + * DMA transfer status code defines
> > + */
> > +enum rte_dma_status_code {
> > +     /** The operation completed successfully */
> > +     RTE_DMA_STATUS_SUCCESSFUL = 0,
> > +     /** The operation failed to complete due active drop
> > +      * This is mainly used when processing dev_stop, allow outstanding
> > +      * requests to be completed as much as possible.
> > +      */
> > +     RTE_DMA_STATUS_ACTIVE_DROP,
> > +     /** The operation failed to complete due invalid source address */
> > +     RTE_DMA_STATUS_INVALID_SRC_ADDR,
> > +     /** The operation failed to complete due invalid destination address */
> > +     RTE_DMA_STATUS_INVALID_DST_ADDR,
> > +     /** The operation failed to complete due invalid length */
> > +     RTE_DMA_STATUS_INVALID_LENGTH,
> > +     /** The operation failed to complete due invalid opcode
> > +      * The DMA descriptor could have multiple format, which are
> > +      * distinguished by the opcode field.
> > +      */
> > +     RTE_DMA_STATUS_INVALID_OPCODE,
> > +     /** The operation failed to complete due bus err */
> > +     RTE_DMA_STATUS_BUS_ERROR,
> > +     /** The operation failed to complete due data poison */
> > +     RTE_DMA_STATUS_DATA_POISION,
> > +     /** The operation failed to complete due descriptor read error */
> > +     RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
> > +     /** The operation failed to complete due device link error
> > +      * Used to indicates that the link error in the mem-to-dev/dev-to-mem/
> > +      * dev-to-dev transfer scenario.
> > +      */
> > +     RTE_DMA_STATUS_DEV_LINK_ERROR,
> > +     /** Driver specific status code offset
> > +      * Start status code for the driver to define its own error code.
> > +      */
> > +     RTE_DMA_STATUS_DRV_SPECIFIC_OFFSET = 0x10000,
> > +};
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Returns the number of operations that failed to complete.
> > + * NOTE: This API was used when rte_dmadev_completed has_error was set.
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + * @param vchan
> > + *   The identifier of virtual DMA channel.
> > + * @param nb_status
> > + *   Indicates the size of status array.
> > + * @param[out] status
> > + *   The error code of operations that failed to complete.
> > + *   Some standard error code are described in 'enum rte_dma_status_code'
> > + *   @see rte_dma_status_code
> > + * @param[out] last_idx
> > + *   The last failed completed operation's index.
> > + *
> > + * @return
> > + *   The number of operations that failed to complete.
> > + */
> > +__rte_experimental
> > +static inline uint16_t
> > +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vchan,
> > +                        const uint16_t nb_status, uint32_t *status,
> > +                        uint16_t *last_idx)
> > +{
> > +     struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> > +#ifdef RTE_DMADEV_DEBUG
> > +     RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> > +     RTE_FUNC_PTR_OR_ERR_RET(status, -EINVAL);
> > +     RTE_FUNC_PTR_OR_ERR_RET(last_idx, -EINVAL);
> > +     RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_fails, -ENOTSUP);
> > +     if (vchan >= dev->data->dev_conf.max_vchans) {
> > +             RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> > +             return -EINVAL;
> > +     }
> > +     if (nb_status == 0) {
> > +             RTE_DMADEV_LOG(ERR, "Invalid nb_status\n");
> > +             return -EINVAL;
> > +     }
> > +#endif
> > +     return (*dev->completed_fails)(dev, vchan, nb_status, status, last_idx);
> > +}
> > +
> > +#ifdef __cplusplus
> > +}
> > +#endif
> > +
> > +#endif /* _RTE_DMADEV_H_ */
> > diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
> > new file mode 100644
> > index 0000000..410faf0
> > --- /dev/null
> > +++ b/lib/dmadev/rte_dmadev_core.h
> > @@ -0,0 +1,159 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2021 HiSilicon Limited.
> > + * Copyright(c) 2021 Intel Corporation.
> > + */
> > +
> > +#ifndef _RTE_DMADEV_CORE_H_
> > +#define _RTE_DMADEV_CORE_H_
> > +
> > +/**
> > + * @file
> > + *
> > + * RTE DMA Device internal header.
> > + *
> > + * This header contains internal data types, that are used by the DMA devices
> > + * in order to expose their ops to the class.
> > + *
> > + * Applications should not use these API directly.
> > + *
> > + */
> > +
> > +struct rte_dmadev;
> > +
> > +/** @internal Used to get device information of a device. */
> > +typedef int (*dmadev_info_get_t)(struct rte_dmadev *dev,
> > +                              struct rte_dmadev_info *dev_info);
>
> First parameter can be "const"
>
> > +/** @internal Used to configure a device. */
> > +typedef int (*dmadev_configure_t)(struct rte_dmadev *dev,
> > +                               const struct rte_dmadev_conf *dev_conf);
> > +
> > +/** @internal Used to start a configured device. */
> > +typedef int (*dmadev_start_t)(struct rte_dmadev *dev);
> > +
> > +/** @internal Used to stop a configured device. */
> > +typedef int (*dmadev_stop_t)(struct rte_dmadev *dev);
> > +
> > +/** @internal Used to close a configured device. */
> > +typedef int (*dmadev_close_t)(struct rte_dmadev *dev);
> > +
> > +/** @internal Used to reset a configured device. */
> > +typedef int (*dmadev_reset_t)(struct rte_dmadev *dev);
> > +
> > +/** @internal Used to allocate and set up a virtual DMA channel. */
> > +typedef int (*dmadev_vchan_setup_t)(struct rte_dmadev *dev,
> > +                                 const struct rte_dmadev_vchan_conf *conf);
> > +
> > +/** @internal Used to release a virtual DMA channel. */
> > +typedef int (*dmadev_vchan_release_t)(struct rte_dmadev *dev, uint16_t vchan);
> > +
> > +/** @internal Used to retrieve basic statistics. */
> > +typedef int (*dmadev_stats_get_t)(struct rte_dmadev *dev, int vchan,
> > +                               struct rte_dmadev_stats *stats);
>
> First parameter can be "const"
>
> > +
> > +/** @internal Used to reset basic statistics. */
> > +typedef int (*dmadev_stats_reset_t)(struct rte_dmadev *dev, int vchan);
> > +
> > +/** @internal Used to dump internal information. */
> > +typedef int (*dmadev_dump_t)(struct rte_dmadev *dev, FILE *f);
> > +
>
> First param "const"
>
> > +/** @internal Used to start dmadev selftest. */
> > +typedef int (*dmadev_selftest_t)(uint16_t dev_id);
> > +
>
> This looks an outlier taking a dev_id. It should take a rawdev parameter.
> Most drivers should not need to implement this anyway, as the main unit
> tests should be in "test_dmadev.c" in the autotest app.
>
> > +/** @internal Used to enqueue a copy operation. */
> > +typedef int (*dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
> > +                          rte_iova_t src, rte_iova_t dst,
> > +                          uint32_t length, uint64_t flags);
> > +
> > +/** @internal Used to enqueue a scatter list copy operation. */
> > +typedef int (*dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
> > +                             const struct rte_dma_sg *sg,
> > +                             uint32_t sg_len, uint64_t flags);
> > +
> > +/** @internal Used to enqueue a fill operation. */
> > +typedef int (*dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
> > +                          uint64_t pattern, rte_iova_t dst,
> > +                          uint32_t length, uint64_t flags);
> > +
> > +/** @internal Used to enqueue a scatter list fill operation. */
> > +typedef int (*dmadev_fill_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
> > +                     uint64_t pattern, const struct rte_dma_sg *sg,
> > +                     uint32_t sg_len, uint64_t flags);
> > +
> > +/** @internal Used to trigger hardware to begin working. */
> > +typedef int (*dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
> > +
> > +/** @internal Used to return number of successful completed operations. */
> > +typedef uint16_t (*dmadev_completed_t)(struct rte_dmadev *dev, uint16_t vchan,
> > +                                    const uint16_t nb_cpls,
> > +                                    uint16_t *last_idx, bool *has_error);
> > +
> > +/** @internal Used to return number of failed completed operations. */
> > +typedef uint16_t (*dmadev_completed_fails_t)(struct rte_dmadev *dev,
> > +                     uint16_t vchan, const uint16_t nb_status,
> > +                     uint32_t *status, uint16_t *last_idx);
> > +
> > +/**
> > + * DMA device operations function pointer table
> > + */
> > +struct rte_dmadev_ops {
> > +     dmadev_info_get_t dev_info_get;
> > +     dmadev_configure_t dev_configure;
> > +     dmadev_start_t dev_start;
> > +     dmadev_stop_t dev_stop;
> > +     dmadev_close_t dev_close;
> > +     dmadev_reset_t dev_reset;
> > +     dmadev_vchan_setup_t vchan_setup;
> > +     dmadev_vchan_release_t vchan_release;
> > +     dmadev_stats_get_t stats_get;
> > +     dmadev_stats_reset_t stats_reset;
> > +     dmadev_dump_t dev_dump;
> > +     dmadev_selftest_t dev_selftest;
> > +};
> > +
> > +/**
> > + * @internal
> > + * The data part, with no function pointers, associated with each DMA device.
> > + *
> > + * This structure is safe to place in shared memory to be common among different
> > + * processes in a multi-process configuration.
> > + */
> > +struct rte_dmadev_data {
> > +     uint16_t dev_id; /**< Device [external] identifier. */
> > +     char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
> > +     void *dev_private; /**< PMD-specific private data. */
> > +     struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
> > +     uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
> > +     uint64_t reserved[4]; /**< Reserved for future fields */
> > +} __rte_cache_aligned;
> > +
>
> While I generally don't like having reserved space, this is one place where
> it makes sense, so +1 for it here.
>
> > +/**
> > + * @internal
> > + * The generic data structure associated with each DMA device.
> > + *
> > + * The dataplane APIs are located at the beginning of the structure, along
> > + * with the pointer to where all the data elements for the particular device
> > + * are stored in shared memory. This split scheme allows the function pointer
> > + * and driver data to be per-process, while the actual configuration data for
> > + * the device is shared.
> > + */
> > +struct rte_dmadev {
> > +     dmadev_copy_t copy;
> > +     dmadev_copy_sg_t copy_sg;
> > +     dmadev_fill_t fill;
> > +     dmadev_fill_sg_t fill_sg;
> > +     dmadev_submit_t submit;
> > +     dmadev_completed_t completed;
> > +     dmadev_completed_fails_t completed_fails;
> > +     const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
> > +     /** Flag indicating the device is attached: ATTACHED(1)/DETACHED(0). */
> > +     uint8_t attached : 1;
>
> Since it's in the midst of a series of pointers, this 1-bit flag is
> actually using 8-bytes of space. Is it needed. Can we use dev_ops == NULL
> or data == NULL instead to indicate this is a valid entry?
>
> > +     /** Device info which supplied during device initialization. */
> > +     struct rte_device *device;
> > +     struct rte_dmadev_data *data; /**< Pointer to device data. */
>
> If we are to try and minimise cacheline access, we should put this data
> pointer - or even better a copy of data->private pointer - at the top of
> the structure on the same cacheline as datapath operations. For dataplane,
> I can't see any elements of data, except the private pointer being
> accessed, so we would probably get most benefit for having a copy put there
> on init of the dmadev struct.
>
> > +     uint64_t reserved[4]; /**< Reserved for future fields */
> > +} __rte_cache_aligned;
> > +
> > +extern struct rte_dmadev rte_dmadevices[];
> > +
> > +#endif /* _RTE_DMADEV_CORE_H_ */
> > diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
> > new file mode 100644
> > index 0000000..45141f9
> > --- /dev/null
> > +++ b/lib/dmadev/rte_dmadev_pmd.h
> > @@ -0,0 +1,72 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2021 HiSilicon Limited.
> > + */
> > +
> > +#ifndef _RTE_DMADEV_PMD_H_
> > +#define _RTE_DMADEV_PMD_H_
> > +
> > +/**
> > + * @file
> > + *
> > + * RTE DMA Device PMD APIs
> > + *
> > + * Driver facing APIs for a DMA device. These are not to be called directly by
> > + * any application.
> > + */
> > +
> > +#include "rte_dmadev.h"
> > +
> > +#ifdef __cplusplus
> > +extern "C" {
> > +#endif
> > +
> > +/**
> > + * @internal
> > + * Allocates a new dmadev slot for an DMA device and returns the pointer
> > + * to that slot for the driver to use.
> > + *
> > + * @param name
> > + *   DMA device name.
> > + *
> > + * @return
> > + *   A pointer to the DMA device slot case of success,
> > + *   NULL otherwise.
> > + */
> > +__rte_internal
> > +struct rte_dmadev *
> > +rte_dmadev_pmd_allocate(const char *name);
> > +
> > +/**
> > + * @internal
> > + * Release the specified dmadev.
> > + *
> > + * @param dev
> > + *   Device to be released.
> > + *
> > + * @return
> > + *   - 0 on success, negative on error
> > + */
> > +__rte_internal
> > +int
> > +rte_dmadev_pmd_release(struct rte_dmadev *dev);
> > +
> > +/**
> > + * @internal
> > + * Return the DMA device based on the device name.
> > + *
> > + * @param name
> > + *   DMA device name.
> > + *
> > + * @return
> > + *   A pointer to the DMA device slot case of success,
> > + *   NULL otherwise.
> > + */
> > +__rte_internal
> > +struct rte_dmadev *
> > +rte_dmadev_get_device_by_name(const char *name);
> > +
> > +#ifdef __cplusplus
> > +}
> > +#endif
> > +
> > +#endif /* _RTE_DMADEV_PMD_H_ */
> > diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
> > new file mode 100644
> > index 0000000..0f099e7
> > --- /dev/null
> > +++ b/lib/dmadev/version.map
> > @@ -0,0 +1,40 @@
> > +EXPERIMENTAL {
> > +     global:
> > +
> > +     rte_dmadev_count;
> > +     rte_dmadev_info_get;
> > +     rte_dmadev_configure;
> > +     rte_dmadev_start;
> > +     rte_dmadev_stop;
> > +     rte_dmadev_close;
> > +     rte_dmadev_reset;
> > +     rte_dmadev_vchan_setup;
> > +     rte_dmadev_vchan_release;
> > +     rte_dmadev_stats_get;
> > +     rte_dmadev_stats_reset;
> > +     rte_dmadev_dump;
> > +     rte_dmadev_selftest;
> > +     rte_dmadev_copy;
> > +     rte_dmadev_copy_sg;
> > +     rte_dmadev_fill;
> > +     rte_dmadev_fill_sg;
> > +     rte_dmadev_submit;
> > +     rte_dmadev_completed;
> > +     rte_dmadev_completed_fails;
> > +
> > +     local: *;
> > +};
>
> The elements in the version.map file blocks should be sorted alphabetically.
>
> > +
> > +INTERNAL {
> > +        global:
> > +
> > +     rte_dmadevices;
> > +     rte_dmadev_pmd_allocate;
> > +     rte_dmadev_pmd_release;
> > +     rte_dmadev_get_device_by_name;
> > +
> > +     local:
> > +
> > +     rte_dmadev_is_valid_dev;
> > +};
> > +
> > diff --git a/lib/meson.build b/lib/meson.build
> > index 1673ca4..68d239f 100644
> > --- a/lib/meson.build
> > +++ b/lib/meson.build
> > @@ -60,6 +60,7 @@ libraries = [
> >          'bpf',
> >          'graph',
> >          'node',
> > +        'dmadev',
> >  ]
> >
> >  if is_windows
> > --
> > 2.8.1
> >

^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v3] dmadev: introduce DMA device library
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (5 preceding siblings ...)
  2021-07-11  9:25 ` [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library Chengwen Feng
@ 2021-07-13 12:27 ` Chengwen Feng
  2021-07-13 13:06   ` fengchengwen
                     ` (4 more replies)
  2021-07-15 15:41 ` [dpdk-dev] [PATCH v4] " Chengwen Feng
                   ` (22 subsequent siblings)
  29 siblings, 5 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-07-13 12:27 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce 'dmadevice' which is a generic type of DMA
device.

The APIs of dmadev library exposes some generic operations which can
enable configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
v3:
* rm reset and fill_sg ops.
* rm MT-safe capabilities.
* add submit flag.
* redefine rte_dma_sg to implement asymmetric copy.
* delete some reserved field for future use.
* rearrangement rte_dmadev/rte_dmadev_data struct.
* refresh rte_dmadev.h copyright.
* update vchan setup parameter.
* modified some inappropriate descriptions.
* arrange version.map alphabetically.
* other minor modifications from review comment.
---
 MAINTAINERS                  |   4 +
 config/rte_config.h          |   3 +
 lib/dmadev/meson.build       |   7 +
 lib/dmadev/rte_dmadev.c      | 561 +++++++++++++++++++++++++
 lib/dmadev/rte_dmadev.h      | 968 +++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h | 161 +++++++
 lib/dmadev/rte_dmadev_pmd.h  |  72 ++++
 lib/dmadev/version.map       |  37 ++
 lib/meson.build              |   1 +
 9 files changed, 1814 insertions(+)
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index af2a91d..e01a07f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -495,6 +495,10 @@ F: drivers/raw/skeleton/
 F: app/test/test_rawdev.c
 F: doc/guides/prog_guide/rawdev.rst
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+
 
 Memory Pool Drivers
 -------------------
diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c..331a431 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -81,6 +81,9 @@
 /* rawdev defines */
 #define RTE_RAWDEV_MAX_DEVS 64
 
+/* dmadev defines */
+#define RTE_DMADEV_MAX_DEVS 64
+
 /* ip_fragmentation defines */
 #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
 #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000..d2fc85e
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+sources = files('rte_dmadev.c')
+headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000..1bca463
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,561 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
+
+static const char *MZ_RTE_DMADEV_DATA = "rte_dmadev_data";
+/* Shared memory between primary and secondary processes. */
+static struct {
+	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
+} *dmadev_shared_data;
+
+RTE_LOG_REGISTER(rte_dmadev_logtype, lib.dmadev, INFO);
+#define RTE_DMADEV_LOG(level, ...) \
+	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "" __VA_ARGS__)
+
+/* Macros to check for valid device id */
+#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dmadev_is_valid_dev(dev_id)) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+#define RTE_DMADEV_VALID_DEV_ID_OR_RET(dev_id) do { \
+	if (!rte_dmadev_is_valid_dev(dev_id)) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
+		return; \
+	} \
+} while (0)
+
+/* Macro to check for invalid pointers */
+#define RTE_DMADEV_PTR_OR_ERR_RET(ptr, retval) do { \
+	if ((ptr) == NULL) \
+		return retval; \
+} while (0)
+
+static int
+dmadev_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMADEV_LOG(ERR, "Name can't be NULL\n");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMADEV_LOG(ERR, "Zero length DMA device name\n");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
+		RTE_DMADEV_LOG(ERR, "DMA device name is too long\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint16_t
+dmadev_find_free_dev(void)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (dmadev_shared_data->data[i].dev_name[0] == '\0') {
+			RTE_ASSERT(rte_dmadevices[i].state ==
+				   RTE_DMADEV_UNUSED);
+			return i;
+		}
+	}
+
+	return RTE_DMADEV_MAX_DEVS;
+}
+
+static struct rte_dmadev*
+dmadev_find(const char *name)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&
+		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
+			return &rte_dmadevices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dmadev_shared_data_prepare(void)
+{
+	const struct rte_memzone *mz;
+
+	if (dmadev_shared_data == NULL) {
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			/* Allocate port data and ownership shared memory. */
+			mz = rte_memzone_reserve(MZ_RTE_DMADEV_DATA,
+					 sizeof(*dmadev_shared_data),
+					 rte_socket_id(), 0);
+		} else {
+			mz = rte_memzone_lookup(MZ_RTE_DMADEV_DATA);
+		}
+		if (mz == NULL)
+			return -ENOMEM;
+
+		dmadev_shared_data = mz->addr;
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+			memset(dmadev_shared_data->data, 0,
+			       sizeof(dmadev_shared_data->data));
+	}
+
+	return 0;
+}
+
+static struct rte_dmadev *
+dmadev_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t dev_id;
+
+	dev = dmadev_find(name);
+	if (dev != NULL) {
+		RTE_DMADEV_LOG(ERR, "DMA device already allocated\n");
+		return NULL;
+	}
+
+	dev_id = dmadev_find_free_dev();
+	if (dev_id == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices\n");
+		return NULL;
+	}
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+	dev->data = &dmadev_shared_data->data[dev_id];
+	dev->data->dev_id = dev_id;
+	strlcpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+
+	return dev;
+}
+
+static struct rte_dmadev *
+dmadev_attach_secondary(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t i;
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %s is not driven by the primary process\n",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[i];
+	dev->data = &dmadev_shared_data->data[i];
+	RTE_ASSERT(dev->data->dev_id == i);
+
+	return dev;
+}
+
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dmadev_allocate(name);
+	else
+		dev = dmadev_attach_secondary(name);
+
+	if (dev == NULL)
+		return NULL;
+	dev->state = RTE_DMADEV_ATTACHED;
+
+	return dev;
+}
+
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev)
+{
+	if (dev == NULL)
+		return -EINVAL;
+
+	if (dev->state == RTE_DMADEV_UNUSED)
+		return 0;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		rte_free(dev->data->dev_private);
+		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
+	}
+
+	memset(dev, 0, sizeof(struct rte_dmadev));
+	dev->state = RTE_DMADEV_UNUSED;
+
+	return 0;
+}
+
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name)
+{
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+	return dmadev_find(name);
+}
+
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id)
+{
+	if (dev_id >= RTE_DMADEV_MAX_DEVS ||
+	    rte_dmadevices[dev_id].state != RTE_DMADEV_ATTACHED)
+		return false;
+	return true;
+}
+
+uint16_t
+rte_dmadev_count(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
+			count++;
+	}
+
+	return count;
+}
+
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
+{
+	const struct rte_dmadev *dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_DMADEV_PTR_OR_ERR_RET(dev_info, -EINVAL);
+
+	dev = &rte_dmadevices[dev_id];
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dmadev_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->device = dev->device;
+	dev_info->nb_vchans = dev->data->dev_conf.max_vchans;
+
+	return 0;
+}
+
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
+{
+	struct rte_dmadev_info info;
+	struct rte_dmadev *dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_DMADEV_PTR_OR_ERR_RET(dev_conf, -EINVAL);
+	dev = &rte_dmadevices[dev_id];
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->max_vchans > info.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure too many vchans\n", dev_id);
+		return -EINVAL;
+	}
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf);
+	if (ret == 0)
+		memcpy(&dev->data->dev_conf, dev_conf, sizeof(*dev_conf));
+
+	return ret;
+}
+
+int
+rte_dmadev_start(uint16_t dev_id)
+{
+	struct rte_dmadev *dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_dmadevices[dev_id];
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already started\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->data->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dmadev_stop(uint16_t dev_id)
+{
+	struct rte_dmadev *dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_dmadevices[dev_id];
+
+	if (dev->data->dev_started == 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already stopped\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dmadev_close(uint16_t dev_id)
+{
+	struct rte_dmadev *dev;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_dmadevices[dev_id];
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped before closing\n", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	return (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf)
+{
+	struct rte_dmadev_info info;
+	struct rte_dmadev *dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_DMADEV_PTR_OR_ERR_RET(conf, -EINVAL);
+
+	dev = &rte_dmadevices[dev_id];
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == 0 ||
+	    conf->direction & ~RTE_DMA_TRANSFER_DIR_ALL) {
+		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction & RTE_DMA_MEM_TO_MEM &&
+	    !(info.dev_capa & RTE_DMA_DEV_CAPA_MEM_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction & RTE_DMA_MEM_TO_DEV &&
+	    !(info.dev_capa & RTE_DMA_DEV_CAPA_MEM_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction & RTE_DMA_DEV_TO_MEM &&
+	    !(info.dev_capa & RTE_DMA_DEV_CAPA_DEV_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction & RTE_DMA_DEV_TO_DEV &&
+	    !(info.dev_capa & RTE_DMA_DEV_CAPA_DEV_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u number of descriptors invalid\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, conf);
+}
+
+int
+rte_dmadev_vchan_release(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_dmadevices[dev_id];
+
+	if (vchan >= dev->data->dev_conf.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_release, -ENOTSUP);
+	return (*dev->dev_ops->vchan_release)(dev, vchan);
+}
+
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats)
+{
+	const struct rte_dmadev *dev;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_DMADEV_PTR_OR_ERR_RET(stats, -EINVAL);
+
+	dev = &rte_dmadevices[dev_id];
+
+	if (vchan >= dev->data->dev_conf.max_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dmadev_stats));
+}
+
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_dmadevices[dev_id];
+
+	if (vchan >= dev->data->dev_conf.max_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f)
+{
+	const struct rte_dmadev *dev;
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_DMADEV_PTR_OR_ERR_RET(f, -EINVAL);
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+
+	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
+	fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
+	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
+	fprintf(f, "  max_vchans_configured: %u\n", info.nb_vchans);
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
+
+int
+rte_dmadev_selftest(uint16_t dev_id)
+{
+	struct rte_dmadev *dev;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	dev = &rte_dmadevices[dev_id];
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
+	return (*dev->dev_ops->dev_selftest)(dev_id);
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000..f6cc4e5
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,968 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ * Copyright(c) 2021 Marvell International Ltd.
+ * Copyright(c) 2021 SmartShare Systems.
+ */
+
+#ifndef _RTE_DMADEV_H_
+#define _RTE_DMADEV_H_
+
+/**
+ * @file rte_dmadev.h
+ *
+ * RTE DMA (Direct Memory Access) device APIs.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW-DMA-channel |               | HW-DMA-channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW-DMA-Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channel, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel.
+ * E.G. Application could create virtual DMA channel 0 for mem-to-mem transfer
+ *      scenario, and create virtual DMA channel 1 for mem-to-dev transfer
+ *      scenario.
+ *
+ * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
+ * PCI/SoC device probing phase performed at EAL initialization time. And could
+ * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
+ * phase.
+ *
+ * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dmadev_configure()
+ *     - rte_dmadev_vchan_setup()
+ *     - rte_dmadev_start()
+ *
+ * Then, the application can invoke dataplane APIs to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. call
+ * rte_dmadev_configure()), it must call rte_dmadev_stop() first to stop the
+ * device and then do the reconfiguration before calling rte_dmadev_start()
+ * again. The dataplane APIs should not be invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the
+ * rte_dmadev_close() function.
+ *
+ * The dataplane APIs include two parts:
+ *   a) The first part is the submission of operation requests:
+ *        - rte_dmadev_copy()
+ *        - rte_dmadev_copy_sg() - scatter-gather form of copy
+ *        - rte_dmadev_fill()
+ *        - rte_dmadev_fill_sg() - scatter-gather form of fill
+ *        - rte_dmadev_perform() - issue doorbell to hardware
+ *      These APIs could work with different virtual DMA channels which have
+ *      different contexts.
+ *      The first four APIs are used to submit the operation request to the
+ *      virtual DMA channel, if the submission is successful, a uint16_t
+ *      ring_idx is returned, otherwise a negative number is returned.
+ *   b) The second part is to obtain the result of requests:
+ *        - rte_dmadev_completed()
+ *            - return the number of operation requests completed successfully.
+ *        - rte_dmadev_completed_fails()
+ *            - return the number of operation requests failed to complete.
+ *
+ * About the ring_idx which rte_dmadev_copy/copy_sg/fill/fill_sg() returned,
+ * the rules are as follows:
+ *   a) ring_idx for each virtual DMA channel are independent.
+ *   b) For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *      when it reach UINT16_MAX, it wraps back to zero.
+ *   c) This ring_idx can be used by applications to track per-operation
+ *      metadata in an application-defined circular ring.
+ *   d) The initial ring_idx of a virtual DMA channel is zero, after the device
+ *      is stopped, the ring_idx needs to be reset to zero.
+ *   Example:
+ *      step-1: start one dmadev
+ *      step-2: enqueue a copy operation, the ring_idx return is 0
+ *      step-3: enqueue a copy operation again, the ring_idx return is 1
+ *      ...
+ *      step-101: stop the dmadev
+ *      step-102: start the dmadev
+ *      step-103: enqueue a copy operation, the cookie return is 0
+ *      ...
+ *      step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *      step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *      ...
+ *
+ * By default, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target object.
+ *
+ */
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#ifdef RTE_DMADEV_DEBUG
+#include <rte_dev.h>
+#endif
+#include <rte_errno.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_count(void);
+
+/**
+ * The capabilities of a DMA device
+ */
+#define RTE_DMA_DEV_CAPA_MEM_TO_MEM	(1ull << 0)
+/**< DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+#define RTE_DMA_DEV_CAPA_MEM_TO_DEV	(1ull << 1)
+/**< DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+#define RTE_DMA_DEV_CAPA_DEV_TO_MEM	(1ull << 2)
+/**< DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+#define RTE_DMA_DEV_CAPA_DEV_TO_DEV	(1ull << 3)
+/**< DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+#define RTE_DMA_DEV_CAPA_OPS_COPY	(1ull << 4)
+/**< DMA device support copy ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+#define RTE_DMA_DEV_CAPA_OPS_FILL	(1ull << 5)
+/**< DMA device support fill ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+#define RTE_DMA_DEV_CAPA_OPS_SG		(1ull << 6)
+/**< DMA device support scatter-list ops.
+ * If device support ops_copy and ops_sg, it means supporting copy_sg ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+#define RTE_DMA_DEV_CAPA_FENCE		(1ull << 7)
+/**< DMA device support fence.
+ * If device support fence, then application could set a fence flags when
+ * enqueue operation by rte_dma_copy/copy_sg/fill/fill_sg.
+ * If a operation has a fence flags, it means the operation must be processed
+ * only after all previous operations are completed.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+#define RTE_DMA_DEV_CAPA_SVA		(1ull << 8)
+/**< DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+/**
+ * A structure used to retrieve the contextual information of
+ * an DMA device
+ */
+struct rte_dmadev_info {
+	struct rte_device *device; /**< Generic Device information */
+	uint64_t dev_capa; /**< Device capabilities (RTE_DMA_DEV_CAPA_*) */
+	/** Maximum number of virtual DMA channels supported */
+	uint16_t max_vchans;
+	/** Maximum allowed number of virtual DMA channel descriptors */
+	uint16_t max_desc;
+	/** Minimum allowed number of virtual DMA channel descriptors */
+	uint16_t min_desc;
+	uint16_t nb_vchans; /**< Number of virtual DMA channel configured */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve the contextual information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
+ *   contextual information of the device.
+ *
+ * @return
+ *   - =0: Success, driver updates the contextual information of the DMA device
+ *   - <0: Error code returned by the driver info get function.
+ *
+ */
+__rte_experimental
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ */
+struct rte_dmadev_conf {
+	/** Maximum number of virtual DMA channel to use.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dmadev_info which get from rte_dmadev_info_get().
+	 */
+	uint16_t max_vchans;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dmadev_conf
+ *   object.
+ *
+ * @return
+ *   - =0: Success, device configured.
+ *   - <0: Error code returned by the driver configuration function.
+ */
+__rte_experimental
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device started.
+ *   - <0: Error code returned by the driver start function.
+ */
+__rte_experimental
+int
+rte_dmadev_start(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dmadev_start()
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device stopped.
+ *   - <0: Error code returned by the driver stop function.
+ */
+__rte_experimental
+int
+rte_dmadev_stop(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *  - =0: Successfully close device
+ *  - <0: Failure to close device
+ */
+__rte_experimental
+int
+rte_dmadev_close(uint16_t dev_id);
+
+/**
+ * DMA transfer direction defines.
+ */
+#define RTE_DMA_MEM_TO_MEM	(1ull << 0)
+/**< DMA transfer direction - from memory to memory.
+ *
+ * @see struct rte_dmadev_vchan_conf::direction
+ */
+#define RTE_DMA_MEM_TO_DEV	(1ull << 1)
+/**< DMA transfer direction - from memory to device.
+ * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs
+ * through the PCIE interface. In this case, the ARM SoCs works in EP(endpoint)
+ * mode, it could initiate a DMA move request from memory (which is ARM memory)
+ * to device (which is x86 host memory).
+ *
+ * @see struct rte_dmadev_vchan_conf::direction
+ */
+#define RTE_DMA_DEV_TO_MEM	(1ull << 2)
+/**< DMA transfer direction - from device to memory.
+ * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs
+ * through the PCIE interface. In this case, the ARM SoCs works in EP(endpoint)
+ * mode, it could initiate a DMA move request from device (which is x86 host
+ * memory) to memory (which is ARM memory).
+ *
+ * @see struct rte_dmadev_vchan_conf::direction
+ */
+#define RTE_DMA_DEV_TO_DEV	(1ull << 3)
+/**< DMA transfer direction - from device to device.
+ * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs
+ * through the PCIE interface. In this case, the ARM SoCs works in EP(endpoint)
+ * mode, it could initiate a DMA move request from device (which is x86 host
+ * memory) to device (which is another x86 host memory).
+ *
+ * @see struct rte_dmadev_vchan_conf::direction
+ */
+#define RTE_DMA_TRANSFER_DIR_ALL	(RTE_DMA_MEM_TO_MEM | \
+					 RTE_DMA_MEM_TO_DEV | \
+					 RTE_DMA_DEV_TO_MEM | \
+					 RTE_DMA_DEV_TO_DEV)
+
+/**
+ * enum rte_dmadev_port_type - DMA port type defines
+ * When
+ */
+enum rte_dmadev_port_type {
+	/** The device port type is PCIE. */
+	RTE_DMADEV_PORT_OF_PCIE = 1,
+};
+
+/**
+ * A structure used to descript DMA port parameters.
+ */
+struct rte_dmadev_port_parameters {
+	enum rte_dmadev_port_type port_type;
+	union {
+		/** For PCIE port
+		 *
+		 * The following model show SoC's PCIE module connects to
+		 * multiple PCIE hosts and multiple endpoints. The PCIE module
+		 * has an integrate DMA controller.
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * System Bus
+		 *    |     ----------PCIE module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIE Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIE Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIE Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * The following structure is used to describe the above access
+		 * port.
+		 */
+		struct {
+			uint64_t coreid : 3; /**< PCIE core id used */
+			uint64_t pfid : 6; /**< PF id used */
+			uint64_t vfen : 1; /**< VF enable bit */
+			uint64_t vfid : 8; /**< VF id used */
+			/** The pasid filed in TLP packet */
+			uint64_t pasid : 20;
+			/** The attributes filed in TLP packet */
+			uint64_t attr : 3;
+			/** The processing hint filed in TLP packet */
+			uint64_t ph : 2;
+			/** The steering tag filed in TLP packet */
+			uint64_t st : 16;
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ */
+struct rte_dmadev_vchan_conf {
+	uint8_t direction;
+	/**< Set of supported transfer directions
+	 * @see RTE_DMA_MEM_TO_MEM
+	 * @see RTE_DMA_MEM_TO_DEV
+	 * @see RTE_DMA_DEV_TO_MEM
+	 * @see RTE_DMA_DEV_TO_DEV
+	 */
+	/** Number of descriptor for the virtual DMA channel */
+	uint16_t nb_desc;
+	/** 1) Used to describes the port parameter in the device-to-memory
+	 * transfer scenario.
+	 * 2) Used to describes the source port parameter in the
+	 * device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_parameters
+	 */
+	struct rte_dmadev_port_parameters src_port;
+	/** 1) Used to describes the port parameter in the memory-to-device-to
+	 * transfer scenario.
+	 * 2) Used to describes the destination port parameter in the
+	 * device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_parameters
+	 */
+	struct rte_dmadev_port_parameters dst_port;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dmadev_vchan_conf object.
+ *
+ * @return
+ *   - >=0: Allocate success, it is the virtual DMA channel id. This value must
+ *          be less than the field 'max_vchans' of struct rte_dmadev_conf
+ *          which configured by rte_dmadev_configure().
+ *   - <0: Error code returned by the driver virtual channel setup function.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Release a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel which return by vchan setup.
+ *
+ * @return
+ *   - =0: Successfully release the virtual DMA channel.
+ *   - <0: Error code returned by the driver virtual channel release function.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_release(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * rte_dmadev_stats - running statistics.
+ */
+struct rte_dmadev_stats {
+	/** Count of operations which were successfully enqueued */
+	uint64_t enqueued_count;
+	/** Count of operations which were submitted to hardware */
+	uint64_t submitted_count;
+	/** Count of operations which failed to complete */
+	uint64_t completed_fail_count;
+	/** Count of operations which successfully complete */
+	uint64_t completed_count;
+};
+
+#define RTE_DMADEV_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dmadev_stats
+ *   object.
+ *
+ * @return
+ *   - =0: Successfully retrieve stats.
+ *   - <0: Failure to retrieve stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   - =0: Successfully reset stats.
+ *   - <0: Failure to reset stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Non-zero otherwise.
+ */
+__rte_experimental
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger the dmadev self test.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - 0: Selftest successful.
+ *   - -ENOTSUP if the device doesn't support selftest
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_selftest(uint16_t dev_id);
+
+/**
+ * rte_dma_sge - can hold scatter DMA operation request entry
+ */
+struct rte_dma_sge {
+	rte_iova_t addr;
+	uint32_t length;
+};
+
+/**
+ * rte_dma_sg - can hold scatter DMA operation request
+ */
+struct rte_dma_sg {
+	struct rte_dma_sge *src;
+	struct rte_dma_sge *dst;
+	uint16_t nb_src; /**< The number of src entry */
+	uint16_t nb_dst; /**< The number of dst entry */
+};
+
+#include "rte_dmadev_core.h"
+
+/**
+ *  DMA flags to augment operation preparation.
+ *  Used as the 'flags' parameter of rte_dmadev_copy/fill.
+ */
+#define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
+/**< DMA fence flag
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ *
+ * @see rte_dmadev_copy()
+ * @see rte_dmadev_copy_sg()
+ * @see rte_dmadev_fill()
+ */
+#define RTE_DMA_OP_FLAG_SUBMIT	(1ull << 1)
+/**< DMA submit flag
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, but does not
+ * trigger hardware to begin that operation.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy job.
+ *   - <0: Error code returned by the driver copy function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+		uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+#endif
+	return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter list copy operation to be performed by hardware,
+ * but does not trigger hardware to begin that operation.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param sg
+ *   The pointer of scatterlist.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy scatterlist job.
+ *   - <0: Error code returned by the driver copy scatterlist function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, const struct rte_dma_sg *sg,
+		   uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    sg == NULL)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+#endif
+	return (*dev->copy_sg)(dev, vchan, sg, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, but does not
+ * trigger hardware to begin that operation.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued fill job.
+ *   - <0: Error code returned by the driver fill function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
+		rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+#endif
+	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dmadev_copy/fill()
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   - =0: Successfully trigger hardware.
+ *   - <0: Failure to trigger hardware.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+#endif
+	return (*dev->submit)(dev, vchan);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		     uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    nb_cpls == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, -ENOTSUP);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
+
+/**
+ * DMA transfer status code defines
+ */
+enum rte_dma_status_code {
+	/** The operation completed successfully */
+	RTE_DMA_STATUS_SUCCESSFUL = 0,
+	/** The operation failed to complete due active drop
+	 * This is mainly used when processing dev_stop, allow outstanding
+	 * requests to be completed as much as possible.
+	 */
+	RTE_DMA_STATUS_ACTIVE_DROP,
+	/** The operation failed to complete due invalid source address */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/** The operation failed to complete due invalid destination address */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/** The operation failed to complete due invalid length */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/** The operation failed to complete due invalid opcode
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/** The operation failed to complete due bus err */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/** The operation failed to complete due data poison */
+	RTE_DMA_STATUS_DATA_POISION,
+	/** The operation failed to complete due descriptor read error */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/** The operation failed to complete due device link error
+	 * Used to indicates that the link error in the mem-to-dev/dev-to-mem/
+	 * dev-to-dev transfer scenario.
+	 */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/** The operation failed to complete due unknown reason */
+	RTE_DMA_STATUS_UNKNOWN,
+	/** Driver specific status code offset
+	 * Start status code for the driver to define its own error code.
+	 */
+	RTE_DMA_STATUS_DRV_SPECIFIC_OFFSET = 0x10000,
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that failed to complete.
+ * NOTE: This API was used when rte_dmadev_completed has_error was set.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_status
+ *   Indicates the size of status array.
+ * @param[out] status
+ *   The error code of operations that failed to complete.
+ *   Some standard error code are described in 'enum rte_dma_status_code'
+ *   @see rte_dma_status_code
+ * @param[out] last_idx
+ *   The last failed completed operation's index.
+ *
+ * @return
+ *   The number of operations that failed to complete.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vchan,
+			   const uint16_t nb_status, uint32_t *status,
+			   uint16_t *last_idx)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    nb_status == 0 ||
+	    status == NULL ||
+	    last_idx == NULL)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_fails, -ENOTSUP);
+#endif
+	return (*dev->completed_fails)(dev, vchan, nb_status, status, last_idx);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_H_ */
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000..b0b6494
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#ifndef _RTE_DMADEV_CORE_H_
+#define _RTE_DMADEV_CORE_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device internal header.
+ *
+ * This header contains internal data types, that are used by the DMA devices
+ * in order to expose their ops to the class.
+ *
+ * Applications should not use these API directly.
+ *
+ */
+
+struct rte_dmadev;
+
+/** @internal Used to get device information of a device. */
+typedef int (*dmadev_info_get_t)(const struct rte_dmadev *dev,
+				 struct rte_dmadev_info *dev_info,
+				 uint32_t info_sz);
+
+/** @internal Used to configure a device. */
+typedef int (*dmadev_configure_t)(struct rte_dmadev *dev,
+				  const struct rte_dmadev_conf *dev_conf);
+
+/** @internal Used to start a configured device. */
+typedef int (*dmadev_start_t)(struct rte_dmadev *dev);
+
+/** @internal Used to stop a configured device. */
+typedef int (*dmadev_stop_t)(struct rte_dmadev *dev);
+
+/** @internal Used to close a configured device. */
+typedef int (*dmadev_close_t)(struct rte_dmadev *dev);
+
+/** @internal Used to allocate and set up a virtual DMA channel. */
+typedef int (*dmadev_vchan_setup_t)(struct rte_dmadev *dev,
+				    const struct rte_dmadev_vchan_conf *conf);
+
+/** @internal Used to release a virtual DMA channel. */
+typedef int (*dmadev_vchan_release_t)(struct rte_dmadev *dev, uint16_t vchan);
+
+/** @internal Used to retrieve basic statistics. */
+typedef int (*dmadev_stats_get_t)(const struct rte_dmadev *dev, uint16_t vchan,
+				  struct rte_dmadev_stats *stats,
+				  uint32_t stats_sz);
+
+/** @internal Used to reset basic statistics. */
+typedef int (*dmadev_stats_reset_t)(struct rte_dmadev *dev, uint16_t vchan);
+
+/** @internal Used to dump internal information. */
+typedef int (*dmadev_dump_t)(const struct rte_dmadev *dev, FILE *f);
+
+/** @internal Used to start dmadev selftest. */
+typedef int (*dmadev_selftest_t)(uint16_t dev_id);
+
+/** @internal Used to enqueue a copy operation. */
+typedef int (*dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
+			     rte_iova_t src, rte_iova_t dst,
+			     uint32_t length, uint64_t flags);
+
+/** @internal Used to enqueue a scatter list copy operation. */
+typedef int (*dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
+				const struct rte_dma_sg *sg, uint64_t flags);
+
+/** @internal Used to enqueue a fill operation. */
+typedef int (*dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
+			     uint64_t pattern, rte_iova_t dst,
+			     uint32_t length, uint64_t flags);
+
+/** @internal Used to trigger hardware to begin working. */
+typedef int (*dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
+
+/** @internal Used to return number of successful completed operations. */
+typedef uint16_t (*dmadev_completed_t)(struct rte_dmadev *dev, uint16_t vchan,
+				       const uint16_t nb_cpls,
+				       uint16_t *last_idx, bool *has_error);
+
+/** @internal Used to return number of failed completed operations. */
+typedef uint16_t (*dmadev_completed_fails_t)(struct rte_dmadev *dev,
+			uint16_t vchan, const uint16_t nb_status,
+			uint32_t *status, uint16_t *last_idx);
+
+/**
+ * Possible states of a DMA device.
+ */
+enum rte_dmadev_state {
+	/** Device is unused before being probed. */
+	RTE_DMADEV_UNUSED = 0,
+	/** Device is attached when allocated in probing. */
+	RTE_DMADEV_ATTACHED,
+};
+
+/**
+ * DMA device operations function pointer table
+ */
+struct rte_dmadev_ops {
+	dmadev_info_get_t dev_info_get;
+	dmadev_configure_t dev_configure;
+	dmadev_start_t dev_start;
+	dmadev_stop_t dev_stop;
+	dmadev_close_t dev_close;
+	dmadev_vchan_setup_t vchan_setup;
+	dmadev_vchan_release_t vchan_release;
+	dmadev_stats_get_t stats_get;
+	dmadev_stats_reset_t stats_reset;
+	dmadev_dump_t dev_dump;
+	dmadev_selftest_t dev_selftest;
+};
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_dmadev_data {
+	void *dev_private; /**< PMD-specific private data. */
+	uint16_t dev_id; /**< Device [external] identifier. */
+	char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ *
+ * The dataplane APIs are located at the beginning of the structure, along
+ * with the pointer to where all the data elements for the particular device
+ * are stored in shared memory. This split scheme allows the function pointer
+ * and driver data to be per-process, while the actual configuration data for
+ * the device is shared.
+ */
+struct rte_dmadev {
+	dmadev_copy_t copy;
+	dmadev_copy_sg_t copy_sg;
+	dmadev_fill_t fill;
+	dmadev_submit_t submit;
+	dmadev_completed_t completed;
+	dmadev_completed_fails_t completed_fails;
+	void *reserved_ptr; /**< Reserved for future IO function */
+	struct rte_dmadev_data *data; /**< Pointer to device data. */
+
+	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
+	/** Device info which supplied during device initialization. */
+	struct rte_device *device;
+	enum rte_dmadev_state state; /**< Flag indicating the device state */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+extern struct rte_dmadev rte_dmadevices[];
+
+#endif /* _RTE_DMADEV_CORE_H_ */
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000..45141f9
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_PMD_H_
+#define _RTE_DMADEV_PMD_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device PMD APIs
+ *
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param dev
+ *   Device to be released.
+ *
+ * @return
+ *   - 0 on success, negative on error
+ */
+__rte_internal
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev);
+
+/**
+ * @internal
+ * Return the DMA device based on the device name.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_PMD_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000..2af78e4
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,37 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dmadev_close;
+	rte_dmadev_completed;
+	rte_dmadev_completed_fails;
+	rte_dmadev_configure;
+	rte_dmadev_copy;
+	rte_dmadev_copy_sg;
+	rte_dmadev_count;
+	rte_dmadev_dump;
+	rte_dmadev_fill;
+	rte_dmadev_info_get;
+	rte_dmadev_is_valid_dev;
+	rte_dmadev_selftest;
+	rte_dmadev_start;
+	rte_dmadev_stats_get;
+	rte_dmadev_stats_reset;
+	rte_dmadev_stop;
+	rte_dmadev_submit;
+	rte_dmadev_vchan_release;
+	rte_dmadev_vchan_setup;
+
+	local: *;
+};
+
+INTERNAL {
+        global:
+
+	rte_dmadevices;
+	rte_dmadev_get_device_by_name;
+	rte_dmadev_pmd_allocate;
+	rte_dmadev_pmd_release;
+
+	local: *;
+};
+
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4..68d239f 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -60,6 +60,7 @@ libraries = [
         'bpf',
         'graph',
         'node',
+        'dmadev',
 ]
 
 if is_windows
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v3] dmadev: introduce DMA device library
  2021-07-13 12:27 ` [dpdk-dev] [PATCH v3] " Chengwen Feng
@ 2021-07-13 13:06   ` fengchengwen
  2021-07-13 13:37     ` Bruce Richardson
  2021-07-13 16:02   ` Bruce Richardson
                     ` (3 subsequent siblings)
  4 siblings, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-07-13 13:06 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

Thank you for your valuable comments, and I think we've taken a big step forward.

@andrew Could you provide the copyright line so that I can add it to relevant file.

@burce, jerin  Some unmodified review comments are returned here:

1.
COMMENT: We allow up to 100 characters per line for DPDK code, so these don't need
to be wrapped so aggressively.

REPLY: Our CI still has 80 characters limit, and I review most framework still comply.

2.
COMMENT: > +#define RTE_DMA_MEM_TO_MEM     (1ull << 0)
RTE_DMA_DIRECTION_...

REPLY: add the 'DIRECTION' may the macro too long, I prefer keep it simple.

3.
COMMENT: > +rte_dmadev_vchan_release(uint16_t dev_id, uint16_t vchan);
We are not making release as pubic API in other device class. See ethdev spec.
bbdev/eventdev/rawdev

REPLY: because ethdev's queue is hard-queue, and here is the software defined channels,
I think release is OK, BTW: bbdev/eventdev also have release ops.

4.
COMMENT:> +       uint64_t reserved[4]; /**< Reserved for future fields */
> +};
Please add the capability for each counter in info structure as one
device may support all
the counters.

REPLY: This is a statistics function. If this function is not supported, then do not need
to implement the stats ops function. Also could to set the unimplemented ones to zero.

5.
COMMENT: > +#endif
> +       return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
Instead of every driver set the NOP function, In the common code, If
the CAPA is not set,
common code can set NOP function for this with <0 return value.

REPLY: I don't think it's a good idea to judge in IO path, it's application duty to ensure
don't call API which driver not supported (which could get from capabilities).

6.
COMMENT: > +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vchan,
> +                          const uint16_t nb_status, uint32_t *status,
uint32_t -> enum rte_dma_status_code

REPLY:I'm still evaluating this. It takes a long time for the driver to perform error code
conversion in this API. Do we need to provide an error code conversion function alone ?

7.
COMMENT: > +typedef int (*dmadev_info_get_t)(struct rte_dmadev *dev,
> +                                struct rte_dmadev_info *dev_info);
Please change to rte_dmadev_info_get_t to avoid conflict due to namespace issue
as this header is exported.

REPLY: I prefer not add 'rte_' prefix, it make the define too long.

8.
COMMENT: > + *        - rte_dmadev_completed_fails()
> + *            - return the number of operation requests failed to complete.
Please rename this to "completed_status" to allow the return of information
other than just errors. As I suggested before, I think this should also be
usable as a slower version of "completed" even in the case where there are
no errors, in that it returns status information for each and every job
rather than just returning as soon as it hits a failure.

REPLY: well, I think it maybe confuse (current OK/FAIL API is easy to understand.),
and we can build the slow path function on the two API.

9.
COMMENT: > +#define RTE_DMA_DEV_CAPA_MEM_TO_MEM	(1ull << 0)
> +/**< DMA device support mem-to-mem transfer.
Do we need this? Can we assume that any device appearing as a dmadev can
do mem-to-mem copies, and drop the capability for mem-to-mem and the
capability for copying?
also for RTE_DMA_DEV_CAPA_OPS_COPY

REPLY: yes, I insist on adding this for the sake of conceptual integrity.
For ioat driver just make a statement.

10.
COMMENT: > +	uint16_t nb_vchans; /**< Number of virtual DMA channel configured */
> +};
Let's add rte_dmadev_conf struct into this to return the configuration
settings.

REPLY: If we add rte_dmadev_conf in, it may break ABI when rte_dmadev_conf add fields.


[snip]

On 2021/7/13 20:27, Chengwen Feng wrote:
> This patch introduce 'dmadevice' which is a generic type of DMA
> device.
> 
> The APIs of dmadev library exposes some generic operations which can
> enable configuration and I/O with the DMA devices.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> ---
> v3:
> * rm reset and fill_sg ops.
> * rm MT-safe capabilities.
> * add submit flag.
> * redefine rte_dma_sg to implement asymmetric copy.
> * delete some reserved field for future use.
> * rearrangement rte_dmadev/rte_dmadev_data struct.
> * refresh rte_dmadev.h copyright.
> * update vchan setup parameter.
> * modified some inappropriate descriptions.
> * arrange version.map alphabetically.
> * other minor modifications from review comment.
> ---
>  MAINTAINERS                  |   4 +
>  config/rte_config.h          |   3 +
>  lib/dmadev/meson.build       |   7 +
>  lib/dmadev/rte_dmadev.c      | 561 +++++++++++++++++++++++++
>  lib/dmadev/rte_dmadev.h      | 968 +++++++++++++++++++++++++++++++++++++++++++
>  lib/dmadev/rte_dmadev_core.h | 161 +++++++
>  lib/dmadev/rte_dmadev_pmd.h  |  72 ++++
>  lib/dmadev/version.map       |  37 ++
>  lib/meson.build              |   1 +


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v3] dmadev: introduce DMA device library
  2021-07-13 13:06   ` fengchengwen
@ 2021-07-13 13:37     ` Bruce Richardson
  2021-07-15  6:44       ` Jerin Jacob
  0 siblings, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-07-13 13:37 UTC (permalink / raw)
  To: fengchengwen
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

On Tue, Jul 13, 2021 at 09:06:39PM +0800, fengchengwen wrote:
> Thank you for your valuable comments, and I think we've taken a big step forward.
> 
> @andrew Could you provide the copyright line so that I can add it to relevant file.
> 
> @burce, jerin  Some unmodified review comments are returned here:

Thanks. Some further comments inline below. Most points you make I'm ok
with, but I do disagree on a number of others.

/Bruce

> 
> 1.
> COMMENT: We allow up to 100 characters per line for DPDK code, so these don't need
> to be wrapped so aggressively.
> 
> REPLY: Our CI still has 80 characters limit, and I review most framework still comply.
> 
Ok.

> 2.
> COMMENT: > +#define RTE_DMA_MEM_TO_MEM     (1ull << 0)
> RTE_DMA_DIRECTION_...
> 
> REPLY: add the 'DIRECTION' may the macro too long, I prefer keep it simple.
> 
DIRECTION could be shortened to DIR, but I think this is probably ok as is
too.

> 3.
> COMMENT: > +rte_dmadev_vchan_release(uint16_t dev_id, uint16_t vchan);
> We are not making release as pubic API in other device class. See ethdev spec.
> bbdev/eventdev/rawdev
> 
> REPLY: because ethdev's queue is hard-queue, and here is the software defined channels,
> I think release is OK, BTW: bbdev/eventdev also have release ops.
> 
Ok

> 4.  COMMENT:> +       uint64_t reserved[4]; /**< Reserved for future
> fields */
> > +};
> Please add the capability for each counter in info structure as one
> device may support all the counters.
> 
> REPLY: This is a statistics function. If this function is not supported,
> then do not need to implement the stats ops function. Also could to set
> the unimplemented ones to zero.
> 
+1
The stats functions should be a minimum set that is supported by all
drivers. Each of these stats can be easily tracked by software if HW
support for it is not available, so I agree that we should not have each
stat as a capability.

> 5.
> COMMENT: > +#endif
> > +       return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
> Instead of every driver set the NOP function, In the common code, If
> the CAPA is not set,
> common code can set NOP function for this with <0 return value.
> 
> REPLY: I don't think it's a good idea to judge in IO path, it's application duty to ensure
> don't call API which driver not supported (which could get from capabilities).
> 
For datapath functions, +1.

> 6.
> COMMENT: > +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vchan,
> > +                          const uint16_t nb_status, uint32_t *status,
> uint32_t -> enum rte_dma_status_code
> 
> REPLY:I'm still evaluating this. It takes a long time for the driver to perform error code
> conversion in this API. Do we need to provide an error code conversion function alone ?
> 
It's not that difficult a conversion to do, and so long as we have the
regular "completed" function which doesn't do all the error manipulation we
should be fine. Performance in the case of errors is not expected to be as
good, since errors should be very rare.

> 7.
> COMMENT: > +typedef int (*dmadev_info_get_t)(struct rte_dmadev *dev,
> > +                                struct rte_dmadev_info *dev_info);
> Please change to rte_dmadev_info_get_t to avoid conflict due to namespace issue
> as this header is exported.
> 
> REPLY: I prefer not add 'rte_' prefix, it make the define too long.
> 
I disagree on this, they need the rte_ prefix, despite the fact it makes
them longer. If length is a concern, these can be changed from "dmadev_" to
"rte_dma_", which is only one character longer.
In fact, I believe Morten already suggested we use "rte_dma" rather than
"rte_dmadev" as a function prefix across the library.

> 8.
> COMMENT: > + *        - rte_dmadev_completed_fails()
> > + *            - return the number of operation requests failed to complete.
> Please rename this to "completed_status" to allow the return of information
> other than just errors. As I suggested before, I think this should also be
> usable as a slower version of "completed" even in the case where there are
> no errors, in that it returns status information for each and every job
> rather than just returning as soon as it hits a failure.
> 
> REPLY: well, I think it maybe confuse (current OK/FAIL API is easy to understand.),
> and we can build the slow path function on the two API.
> 
I still disagree on this too. We have a "completed" op where we get
informed of what has completed and minimal error indication, and a
"completed_status" operation which provides status information for each
operation completed, at the cost of speed.

> 9.
> COMMENT: > +#define RTE_DMA_DEV_CAPA_MEM_TO_MEM	(1ull << 0)
> > +/**< DMA device support mem-to-mem transfer.
> Do we need this? Can we assume that any device appearing as a dmadev can
> do mem-to-mem copies, and drop the capability for mem-to-mem and the
> capability for copying?
> also for RTE_DMA_DEV_CAPA_OPS_COPY
> 
> REPLY: yes, I insist on adding this for the sake of conceptual integrity.
> For ioat driver just make a statement.
> 

Ok. It seems a wasted bit to me, but I don't see us running out of them
soon.

> 10.
> COMMENT: > +	uint16_t nb_vchans; /**< Number of virtual DMA channel configured */
> > +};
> Let's add rte_dmadev_conf struct into this to return the configuration
> settings.
> 
> REPLY: If we add rte_dmadev_conf in, it may break ABI when rte_dmadev_conf add fields.
> 
Yes, that is true, but I fail to see why that is a major problem. It just
means that if the conf structure changes we have two functions to version
instead of one. The information is still useful.

If you don't want the actual conf structure explicitly put into the info
struct, we can instead put the fields in directly. I really think that the
info_get function should provide back to the user the details of what way
the device was configured previously.

regards,
/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library
  2021-07-11  9:25 ` [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library Chengwen Feng
                     ` (5 preceding siblings ...)
  2021-07-12 15:50   ` Bruce Richardson
@ 2021-07-13 14:19   ` Ananyev, Konstantin
  2021-07-13 14:28     ` Bruce Richardson
  6 siblings, 1 reply; 339+ messages in thread
From: Ananyev, Konstantin @ 2021-07-13 14:19 UTC (permalink / raw)
  To: Chengwen Feng, thomas, Yigit, Ferruh, Richardson,  Bruce, jerinj,
	jerinjacobk
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor, liangma


> +#include "rte_dmadev_core.h"
> +
> +/**
> + *  DMA flags to augment operation preparation.
> + *  Used as the 'flags' parameter of rte_dmadev_copy/copy_sg/fill/fill_sg.
> + */
> +#define RTE_DMA_FLAG_FENCE	(1ull << 0)
> +/**< DMA fence flag
> + * It means the operation with this flag must be processed only after all
> + * previous operations are completed.
> + *
> + * @see rte_dmadev_copy()
> + * @see rte_dmadev_copy_sg()
> + * @see rte_dmadev_fill()
> + * @see rte_dmadev_fill_sg()
> + */
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a copy operation onto the virtual DMA channel.
> + *
> + * This queues up a copy operation to be performed by hardware, but does not
> + * trigger hardware to begin that operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param src
> + *   The address of the source buffer.
> + * @param dst
> + *   The address of the destination buffer.
> + * @param length
> + *   The length of the data to be copied.
> + * @param flags
> + *   An flags for this operation.
> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued copy job.
> + *   - <0: Error code returned by the driver copy function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
> +		uint32_t length, uint64_t flags)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];

One question I have - did you guys consider hiding definitions of struct rte_dmadev 
and  rte_dmadevices[] into .c straight from the start?
Probably no point to repeat our famous ABI ethdev/cryptodev/... pitfalls here.  

> +#ifdef RTE_DMADEV_DEBUG
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
> +	if (vchan >= dev->data->dev_conf.max_vchans) {
> +		RTE_DMADEV_LOG(ERR, "Invalid vchan %d\n", vchan);
> +		return -EINVAL;
> +	}
> +#endif
> +	return (*dev->copy)(dev, vchan, src, dst, length, flags);
> +}
> +
> +/**

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library
  2021-07-13 14:19   ` Ananyev, Konstantin
@ 2021-07-13 14:28     ` Bruce Richardson
  0 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-13 14:28 UTC (permalink / raw)
  To: Ananyev, Konstantin
  Cc: Chengwen Feng, thomas, Yigit, Ferruh, jerinj, jerinjacobk, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor, liangma

On Tue, Jul 13, 2021 at 03:19:39PM +0100, Ananyev, Konstantin wrote:
> 
> > +#include "rte_dmadev_core.h"
> > +
> > +/**
> > + *  DMA flags to augment operation preparation.
> > + *  Used as the 'flags' parameter of rte_dmadev_copy/copy_sg/fill/fill_sg.
> > + */
> > +#define RTE_DMA_FLAG_FENCE   (1ull << 0)
> > +/**< DMA fence flag
> > + * It means the operation with this flag must be processed only after all
> > + * previous operations are completed.
> > + *
> > + * @see rte_dmadev_copy()
> > + * @see rte_dmadev_copy_sg()
> > + * @see rte_dmadev_fill()
> > + * @see rte_dmadev_fill_sg()
> > + */
> > +
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Enqueue a copy operation onto the virtual DMA channel.
> > + *
> > + * This queues up a copy operation to be performed by hardware, but does not
> > + * trigger hardware to begin that operation.
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + * @param vchan
> > + *   The identifier of virtual DMA channel.
> > + * @param src
> > + *   The address of the source buffer.
> > + * @param dst
> > + *   The address of the destination buffer.
> > + * @param length
> > + *   The length of the data to be copied.
> > + * @param flags
> > + *   An flags for this operation.
> > + *
> > + * @return
> > + *   - 0..UINT16_MAX: index of enqueued copy job.
> > + *   - <0: Error code returned by the driver copy function.
> > + */
> > +__rte_experimental
> > +static inline int
> > +rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
> > +             uint32_t length, uint64_t flags)
> > +{
> > +     struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> 
> One question I have - did you guys consider hiding definitions of struct rte_dmadev
> and  rte_dmadevices[] into .c straight from the start?
> Probably no point to repeat our famous ABI ethdev/cryptodev/... pitfalls here.
> 
I considered it, but I found even moving one operation (the doorbell one)
to be non-inline made a small but noticable perf drop. Until we get all the
drivers done and more testing in various scenarios, I'd rather err on the
side of getting the best performance.

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v3] dmadev: introduce DMA device library
  2021-07-13 12:27 ` [dpdk-dev] [PATCH v3] " Chengwen Feng
  2021-07-13 13:06   ` fengchengwen
@ 2021-07-13 16:02   ` Bruce Richardson
  2021-07-14 12:22   ` Nipun Gupta
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-13 16:02 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

On Tue, Jul 13, 2021 at 08:27:43PM +0800, Chengwen Feng wrote:
> This patch introduce 'dmadevice' which is a generic type of DMA
> device.
> 
> The APIs of dmadev library exposes some generic operations which can
> enable configuration and I/O with the DMA devices.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> ---
> v3:
> * rm reset and fill_sg ops.
> * rm MT-safe capabilities.
> * add submit flag.
> * redefine rte_dma_sg to implement asymmetric copy.
> * delete some reserved field for future use.
> * rearrangement rte_dmadev/rte_dmadev_data struct.
> * refresh rte_dmadev.h copyright.
> * update vchan setup parameter.
> * modified some inappropriate descriptions.
> * arrange version.map alphabetically.
> * other minor modifications from review comment.
> ---

Thanks, some further comments inline below on the .c file initially.

/Bruce

>  MAINTAINERS                  |   4 +
>  config/rte_config.h          |   3 +
>  lib/dmadev/meson.build       |   7 +
>  lib/dmadev/rte_dmadev.c      | 561 +++++++++++++++++++++++++
>  lib/dmadev/rte_dmadev.h      | 968 +++++++++++++++++++++++++++++++++++++++++++
>  lib/dmadev/rte_dmadev_core.h | 161 +++++++
>  lib/dmadev/rte_dmadev_pmd.h  |  72 ++++
>  lib/dmadev/version.map       |  37 ++
>  lib/meson.build              |   1 +
>  9 files changed, 1814 insertions(+)
>  create mode 100644 lib/dmadev/meson.build
>  create mode 100644 lib/dmadev/rte_dmadev.c
>  create mode 100644 lib/dmadev/rte_dmadev.h
>  create mode 100644 lib/dmadev/rte_dmadev_core.h
>  create mode 100644 lib/dmadev/rte_dmadev_pmd.h
>  create mode 100644 lib/dmadev/version.map
> 
> diff --git a/MAINTAINERS b/MAINTAINERS
> index af2a91d..e01a07f 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -495,6 +495,10 @@ F: drivers/raw/skeleton/
>  F: app/test/test_rawdev.c
>  F: doc/guides/prog_guide/rawdev.rst
>  
> +DMA device API - EXPERIMENTAL
> +M: Chengwen Feng <fengchengwen@huawei.com>
> +F: lib/dmadev/
> +
>  
>  Memory Pool Drivers
>  -------------------
> diff --git a/config/rte_config.h b/config/rte_config.h
> index 590903c..331a431 100644
> --- a/config/rte_config.h
> +++ b/config/rte_config.h
> @@ -81,6 +81,9 @@
>  /* rawdev defines */
>  #define RTE_RAWDEV_MAX_DEVS 64
>  
> +/* dmadev defines */
> +#define RTE_DMADEV_MAX_DEVS 64
> +
>  /* ip_fragmentation defines */
>  #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
>  #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
> diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
> new file mode 100644
> index 0000000..d2fc85e
> --- /dev/null
> +++ b/lib/dmadev/meson.build
> @@ -0,0 +1,7 @@
> +# SPDX-License-Identifier: BSD-3-Clause
> +# Copyright(c) 2021 HiSilicon Limited.
> +
> +sources = files('rte_dmadev.c')
> +headers = files('rte_dmadev.h')
> +indirect_headers += files('rte_dmadev_core.h')
> +driver_sdk_headers += files('rte_dmadev_pmd.h')
> diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
> new file mode 100644
> index 0000000..1bca463
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev.c
> @@ -0,0 +1,561 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + * Copyright(c) 2021 Intel Corporation.
> + */
> +
> +#include <ctype.h>
> +#include <inttypes.h>
> +#include <stdint.h>
> +#include <stdio.h>
> +#include <stdlib.h>
> +#include <string.h>
> +
> +#include <rte_debug.h>
> +#include <rte_dev.h>
> +#include <rte_eal.h>
> +#include <rte_errno.h>
> +#include <rte_lcore.h>
> +#include <rte_log.h>
> +#include <rte_memory.h>
> +#include <rte_memzone.h>
> +#include <rte_malloc.h>
> +#include <rte_string_fns.h>
> +
> +#include "rte_dmadev.h"
> +#include "rte_dmadev_pmd.h"
> +
> +struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
> +
> +static const char *MZ_RTE_DMADEV_DATA = "rte_dmadev_data";
> +/* Shared memory between primary and secondary processes. */
> +static struct {
> +	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
> +} *dmadev_shared_data;
> +
> +RTE_LOG_REGISTER(rte_dmadev_logtype, lib.dmadev, INFO);

There is an RTE_LOG_REGISTER_DEFAULT macro which can be used here instead.
Also, since the logtype is not exposed outside this file, we can drop the
prefix on it to shorten it:

"RTE_LOG_REGISTER_DEFAULT(logtype, INFO);"

> +#define RTE_DMADEV_LOG(level, ...) \
> +	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "" __VA_ARGS__)
> +
> +/* Macros to check for valid device id */
> +#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
> +	if (!rte_dmadev_is_valid_dev(dev_id)) { \
> +		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
> +		return retval; \
> +	} \
> +} while (0)
> +
> +#define RTE_DMADEV_VALID_DEV_ID_OR_RET(dev_id) do { \
> +	if (!rte_dmadev_is_valid_dev(dev_id)) { \
> +		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
> +		return; \
> +	} \
> +} while (0)
> +
Looking through the code, this macro appears unused, since all functions
return values.
The former "RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET" can also be shorted to
remove prefixes, because it's again local to the file. Suggest:
"VALID_DEV_ID_OR_ERR"

> +/* Macro to check for invalid pointers */
> +#define RTE_DMADEV_PTR_OR_ERR_RET(ptr, retval) do { \
> +	if ((ptr) == NULL) \
> +		return retval; \
> +} while (0)
> +
This is a very short macro, so in practice it's only saving one line of
code. Also, with current use, the "retval" is always -EINVAL. I'd tend
towards dropping the macro, but if we want one, I'd suggest a short
one-line one:

"#define CHECK_PTR_PARAM(ptr) if ((ptr) == NULL) return -EINVAL"

However, overall I don't think it's worth it - case in point, see the check
for "name" below which skips using the macro anyway.

> +static int
> +dmadev_check_name(const char *name)
> +{
> +	size_t name_len;
> +
> +	if (name == NULL) {
> +		RTE_DMADEV_LOG(ERR, "Name can't be NULL\n");
> +		return -EINVAL;
> +	}
> +
> +	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
> +	if (name_len == 0) {
> +		RTE_DMADEV_LOG(ERR, "Zero length DMA device name\n");
> +		return -EINVAL;
> +	}
> +	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
> +		RTE_DMADEV_LOG(ERR, "DMA device name is too long\n");
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static uint16_t
> +dmadev_find_free_dev(void)
> +{
> +	uint16_t i;
> +
> +	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
> +		if (dmadev_shared_data->data[i].dev_name[0] == '\0') {
> +			RTE_ASSERT(rte_dmadevices[i].state ==
> +				   RTE_DMADEV_UNUSED);
> +			return i;
> +		}
> +	}
> +
> +	return RTE_DMADEV_MAX_DEVS;
> +}
> +
> +static struct rte_dmadev*
> +dmadev_find(const char *name)
> +{
> +	uint16_t i;
> +
> +	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
> +		if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&
> +		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
> +			return &rte_dmadevices[i];
> +	}
> +
> +	return NULL;
> +}
> +
> +static int
> +dmadev_shared_data_prepare(void)
> +{
> +	const struct rte_memzone *mz;
> +
> +	if (dmadev_shared_data == NULL) {
> +		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
> +			/* Allocate port data and ownership shared memory. */
> +			mz = rte_memzone_reserve(MZ_RTE_DMADEV_DATA,
> +					 sizeof(*dmadev_shared_data),
> +					 rte_socket_id(), 0);
> +		} else {
> +			mz = rte_memzone_lookup(MZ_RTE_DMADEV_DATA);
> +		}

Minor nit, our coding style for DPDK says to omit the braces around
single-statement legs like this.

> +		if (mz == NULL)
> +			return -ENOMEM;
> +
> +		dmadev_shared_data = mz->addr;
> +		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
> +			memset(dmadev_shared_data->data, 0,
> +			       sizeof(dmadev_shared_data->data));

I believe all memzones are zero on allocation anyway, so this memset is
unecessary and can be dropped.

> +	}
> +
> +	return 0;
> +}
> +
> +static struct rte_dmadev *
> +dmadev_allocate(const char *name)
> +{
> +	struct rte_dmadev *dev;
> +	uint16_t dev_id;
> +
> +	dev = dmadev_find(name);
> +	if (dev != NULL) {
> +		RTE_DMADEV_LOG(ERR, "DMA device already allocated\n");
> +		return NULL;
> +	}
> +
> +	dev_id = dmadev_find_free_dev();
> +	if (dev_id == RTE_DMADEV_MAX_DEVS) {
> +		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices\n");
> +		return NULL;
> +	}
> +
> +	if (dmadev_shared_data_prepare() != 0) {
> +		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
> +		return NULL;
> +	}
> +
> +	dev = &rte_dmadevices[dev_id];
> +	dev->data = &dmadev_shared_data->data[dev_id];
> +	dev->data->dev_id = dev_id;
> +	strlcpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
> +
> +	return dev;
> +}
> +
> +static struct rte_dmadev *
> +dmadev_attach_secondary(const char *name)
> +{
> +	struct rte_dmadev *dev;
> +	uint16_t i;
> +
> +	if (dmadev_shared_data_prepare() != 0) {
> +		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
> +		return NULL;
> +	}
> +
> +	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
> +		if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
> +			break;
> +	}
> +	if (i == RTE_DMADEV_MAX_DEVS) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %s is not driven by the primary process\n",
> +			name);
> +		return NULL;
> +	}
> +
> +	dev = &rte_dmadevices[i];
> +	dev->data = &dmadev_shared_data->data[i];
> +	RTE_ASSERT(dev->data->dev_id == i);
> +
> +	return dev;
> +}
> +
> +struct rte_dmadev *
> +rte_dmadev_pmd_allocate(const char *name)
> +{
> +	struct rte_dmadev *dev;
> +
> +	if (dmadev_check_name(name) != 0)
> +		return NULL;
> +
> +	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
> +		dev = dmadev_allocate(name);
> +	else
> +		dev = dmadev_attach_secondary(name);
> +
> +	if (dev == NULL)
> +		return NULL;
> +	dev->state = RTE_DMADEV_ATTACHED;
> +
> +	return dev;
> +}
> +
> +int
> +rte_dmadev_pmd_release(struct rte_dmadev *dev)
> +{
> +	if (dev == NULL)
> +		return -EINVAL;
> +
> +	if (dev->state == RTE_DMADEV_UNUSED)
> +		return 0;
> +
> +	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
> +		rte_free(dev->data->dev_private);

There seems an imbalance here. If we "free" on release, we should similarly
"malloc" on allocate, otherwise we run the risk of dev_private being
allocated using regular malloc in a driver, for example. I think some other
allocation APIs take as parameter the private data size to reserve, and we
can follow that model.

> +		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
> +	}
> +
> +	memset(dev, 0, sizeof(struct rte_dmadev));
> +	dev->state = RTE_DMADEV_UNUSED;
> +
> +	return 0;
> +}
> +
> +struct rte_dmadev *
> +rte_dmadev_get_device_by_name(const char *name)
> +{
> +	if (dmadev_check_name(name) != 0)
> +		return NULL;
> +	return dmadev_find(name);
> +}
> +
> +bool
> +rte_dmadev_is_valid_dev(uint16_t dev_id)
> +{
> +	if (dev_id >= RTE_DMADEV_MAX_DEVS ||
> +	    rte_dmadevices[dev_id].state != RTE_DMADEV_ATTACHED)
> +		return false;
> +	return true;
> +}

Can be a one-line function:
"return (dev_id < RTE_DMADEV_MAX_DEVS && 
		rte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED);"

> +
> +uint16_t
> +rte_dmadev_count(void)
> +{
> +	uint16_t count = 0;
> +	uint16_t i;
> +
> +	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
> +		if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
> +			count++;
> +	}
> +
> +	return count;
> +}
> +
> +int
> +rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
> +{
> +	const struct rte_dmadev *dev;
> +	int ret;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_DMADEV_PTR_OR_ERR_RET(dev_info, -EINVAL);
> +
> +	dev = &rte_dmadevices[dev_id];

This line can be merged into the definition of dev, since it's just
assigning an address and never referencing it.

> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
> +	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
> +	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
> +					    sizeof(struct rte_dmadev_info));
> +	if (ret != 0)
> +		return ret;
> +
> +	dev_info->device = dev->device;
> +	dev_info->nb_vchans = dev->data->dev_conf.max_vchans;
> +
> +	return 0;
> +}
> +
> +int
> +rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
> +{
> +	struct rte_dmadev_info info;
> +	struct rte_dmadev *dev;
> +	int ret;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_DMADEV_PTR_OR_ERR_RET(dev_conf, -EINVAL);
> +	dev = &rte_dmadevices[dev_id];

As above, merge into definition line of dev as:
"struct rte_dmadev *dev = &rte_dmadevices[dev_id];"

> +
> +	ret = rte_dmadev_info_get(dev_id, &info);
> +	if (ret != 0) {
> +		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
> +		return -EINVAL;
> +	}
> +	if (dev_conf->max_vchans > info.max_vchans) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u configure too many vchans\n", dev_id);
> +		return -EINVAL;
> +	}
> +
> +	if (dev->data->dev_started != 0) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u must be stopped to allow configuration\n",
> +			dev_id);
> +		return -EBUSY;
> +	}
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);

Rather than putting in all these checks and returning -ENOTSUP, I'd like
propose that we instead have the ops structure assigned as part of 
"rte_dmadev_pmd_allocate()" function. That then allows us to enforce that
each device supports the minimum set of functions, i.e. info_get,
configure, etc. etc.

> +	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf);
> +	if (ret == 0)
> +		memcpy(&dev->data->dev_conf, dev_conf, sizeof(*dev_conf));
> +
> +	return ret;
> +}
> +
> +int
> +rte_dmadev_start(uint16_t dev_id)
> +{
> +	struct rte_dmadev *dev;
> +	int ret;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	dev = &rte_dmadevices[dev_id];
> +
> +	if (dev->data->dev_started != 0) {
> +		RTE_DMADEV_LOG(WARNING, "Device %u already started\n", dev_id);
> +		return 0;
> +	}
> +
> +	if (dev->dev_ops->dev_start == NULL)
> +		goto mark_started;
> +
> +	ret = (*dev->dev_ops->dev_start)(dev);
> +	if (ret != 0)
> +		return ret;
> +
> +mark_started:
> +	dev->data->dev_started = 1;
> +	return 0;
> +}
> +
> +int
> +rte_dmadev_stop(uint16_t dev_id)
> +{
> +	struct rte_dmadev *dev;
> +	int ret;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	dev = &rte_dmadevices[dev_id];
> +
> +	if (dev->data->dev_started == 0) {
> +		RTE_DMADEV_LOG(WARNING, "Device %u already stopped\n", dev_id);
> +		return 0;
> +	}
> +
> +	if (dev->dev_ops->dev_stop == NULL)
> +		goto mark_stopped;
> +
> +	ret = (*dev->dev_ops->dev_stop)(dev);
> +	if (ret != 0)
> +		return ret;
> +
> +mark_stopped:
> +	dev->data->dev_started = 0;
> +	return 0;
> +}
> +
> +int
> +rte_dmadev_close(uint16_t dev_id)
> +{
> +	struct rte_dmadev *dev;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	dev = &rte_dmadevices[dev_id];
> +
> +	/* Device must be stopped before it can be closed */
> +	if (dev->data->dev_started == 1) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u must be stopped before closing\n", dev_id);
> +		return -EBUSY;
> +	}
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
> +	return (*dev->dev_ops->dev_close)(dev);
> +}
> +
> +int
> +rte_dmadev_vchan_setup(uint16_t dev_id,
> +		       const struct rte_dmadev_vchan_conf *conf)
> +{
> +	struct rte_dmadev_info info;
> +	struct rte_dmadev *dev;
> +	int ret;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_DMADEV_PTR_OR_ERR_RET(conf, -EINVAL);
> +
> +	dev = &rte_dmadevices[dev_id];
> +
> +	ret = rte_dmadev_info_get(dev_id, &info);
> +	if (ret != 0) {
> +		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
> +		return -EINVAL;
> +	}
> +	if (conf->direction == 0 ||
> +	    conf->direction & ~RTE_DMA_TRANSFER_DIR_ALL) {
> +		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!\n", dev_id);
> +		return -EINVAL;
> +	}
> +	if (conf->direction & RTE_DMA_MEM_TO_MEM &&
> +	    !(info.dev_capa & RTE_DMA_DEV_CAPA_MEM_TO_MEM)) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u don't support mem2mem transfer\n", dev_id);
> +		return -EINVAL;
> +	}
> +	if (conf->direction & RTE_DMA_MEM_TO_DEV &&
> +	    !(info.dev_capa & RTE_DMA_DEV_CAPA_MEM_TO_DEV)) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u don't support mem2dev transfer\n", dev_id);
> +		return -EINVAL;
> +	}
> +	if (conf->direction & RTE_DMA_DEV_TO_MEM &&
> +	    !(info.dev_capa & RTE_DMA_DEV_CAPA_DEV_TO_MEM)) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u don't support dev2mem transfer\n", dev_id);
> +		return -EINVAL;
> +	}
> +	if (conf->direction & RTE_DMA_DEV_TO_DEV &&
> +	    !(info.dev_capa & RTE_DMA_DEV_CAPA_DEV_TO_DEV)) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u don't support dev2dev transfer\n", dev_id);
> +		return -EINVAL;
> +	}

Rather than checking each one of these individually, can we just merge
these checks into one?

> +	if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u number of descriptors invalid\n", dev_id);
> +		return -EINVAL;
> +	}
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
> +	return (*dev->dev_ops->vchan_setup)(dev, conf);
> +}
> +
> +int
> +rte_dmadev_vchan_release(uint16_t dev_id, uint16_t vchan)
> +{
> +	struct rte_dmadev *dev;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	dev = &rte_dmadevices[dev_id];
> +
> +	if (vchan >= dev->data->dev_conf.max_vchans) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u vchan %u out of range\n", dev_id, vchan);
> +		return -EINVAL;
> +	}
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_release, -ENOTSUP);
> +	return (*dev->dev_ops->vchan_release)(dev, vchan);
> +}
> +
> +int
> +rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
> +		     struct rte_dmadev_stats *stats)
> +{
> +	const struct rte_dmadev *dev;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_DMADEV_PTR_OR_ERR_RET(stats, -EINVAL);
> +
> +	dev = &rte_dmadevices[dev_id];
> +
> +	if (vchan >= dev->data->dev_conf.max_vchans &&
> +	    vchan != RTE_DMADEV_ALL_VCHAN) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u vchan %u out of range\n", dev_id, vchan);
> +		return -EINVAL;
> +	}
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
> +	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
> +					  sizeof(struct rte_dmadev_stats));
> +}
> +
> +int
> +rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
> +{
> +	struct rte_dmadev *dev;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	dev = &rte_dmadevices[dev_id];
> +
> +	if (vchan >= dev->data->dev_conf.max_vchans &&
> +	    vchan != RTE_DMADEV_ALL_VCHAN) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u vchan %u out of range\n", dev_id, vchan);
> +		return -EINVAL;
> +	}
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
> +	return (*dev->dev_ops->stats_reset)(dev, vchan);
> +}
> +
> +int
> +rte_dmadev_dump(uint16_t dev_id, FILE *f)
> +{
> +	const struct rte_dmadev *dev;
> +	struct rte_dmadev_info info;
> +	int ret;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_DMADEV_PTR_OR_ERR_RET(f, -EINVAL);
> +
> +	ret = rte_dmadev_info_get(dev_id, &info);
> +	if (ret != 0) {
> +		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
> +		return -EINVAL;
> +	}
> +
> +	dev = &rte_dmadevices[dev_id];
> +
> +	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
> +		dev->data->dev_id,
> +		dev->data->dev_name,
> +		dev->data->dev_started ? "started" : "stopped");
> +	fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
> +	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
> +	fprintf(f, "  max_vchans_configured: %u\n", info.nb_vchans);
> +
> +	if (dev->dev_ops->dev_dump != NULL)
> +		return (*dev->dev_ops->dev_dump)(dev, f);
> +
> +	return 0;
> +}
> +
> +int
> +rte_dmadev_selftest(uint16_t dev_id)
> +{
> +	struct rte_dmadev *dev;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	dev = &rte_dmadevices[dev_id];
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
> +	return (*dev->dev_ops->dev_selftest)(dev_id);
> +}


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v3] dmadev: introduce DMA device library
  2021-07-13 12:27 ` [dpdk-dev] [PATCH v3] " Chengwen Feng
  2021-07-13 13:06   ` fengchengwen
  2021-07-13 16:02   ` Bruce Richardson
@ 2021-07-14 12:22   ` Nipun Gupta
  2021-07-15  8:29     ` fengchengwen
  2021-07-14 16:05   ` Bruce Richardson
  2021-07-15  7:10   ` Jerin Jacob
  4 siblings, 1 reply; 339+ messages in thread
From: Nipun Gupta @ 2021-07-14 12:22 UTC (permalink / raw)
  To: Chengwen Feng, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, Hemant Agrawal, maxime.coquelin, honnappa.nagarahalli,
	david.marchand, sburla, pkapoor, konstantin.ananyev,
	Gagandeep Singh

<snip>

> +/**
> + * A structure used to configure a virtual DMA channel.
> + */
> +struct rte_dmadev_vchan_conf {
> +	uint8_t direction;
> +	/**< Set of supported transfer directions
> +	 * @see RTE_DMA_MEM_TO_MEM
> +	 * @see RTE_DMA_MEM_TO_DEV
> +	 * @see RTE_DMA_DEV_TO_MEM
> +	 * @see RTE_DMA_DEV_TO_DEV
> +	 */
> +	/** Number of descriptor for the virtual DMA channel */
> +	uint16_t nb_desc;
> +	/** 1) Used to describes the port parameter in the device-to-memory
> +	 * transfer scenario.
> +	 * 2) Used to describes the source port parameter in the
> +	 * device-to-device transfer scenario.
> +	 * @see struct rte_dmadev_port_parameters
> +	 */

There should also be a configuration to support no response (per Virtual Channel),
And if that is enabled, user will not be required to call 'rte_dmadev_completed' API.
This shall also be part of capability.

> +	struct rte_dmadev_port_parameters src_port;
> +	/** 1) Used to describes the port parameter in the memory-to-device-to
> +	 * transfer scenario.
> +	 * 2) Used to describes the destination port parameter in the
> +	 * device-to-device transfer scenario.
> +	 * @see struct rte_dmadev_port_parameters
> +	 */
> +	struct rte_dmadev_port_parameters dst_port;
> +};
> +

<snip>

> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a scatter list copy operation onto the virtual DMA channel.
> + *
> + * This queues up a scatter list copy operation to be performed by hardware,
> + * but does not trigger hardware to begin that operation.

This would need update with the submit flag.
The statement should be true only when the flag is set?
Similar comment I see on 'rte_dmadev_copy_sg' and 'rte_dma_fill' APIs

> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param sg
> + *   The pointer of scatterlist.
> + * @param flags
> + *   An flags for this operation.
> + *   @see RTE_DMA_OP_FLAG_*
> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued copy scatterlist job.
> + *   - <0: Error code returned by the driver copy scatterlist function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, const struct rte_dma_sg
> *sg,
> +		   uint64_t flags)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +#ifdef RTE_DMADEV_DEBUG
> +	if (!rte_dmadev_is_valid_dev(dev_id) ||
> +	    vchan >= dev->data->dev_conf.max_vchans ||
> +	    sg == NULL)
> +		return -EINVAL;
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
> +#endif
> +	return (*dev->copy_sg)(dev, vchan, sg, flags);
> +}
> +


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v3] dmadev: introduce DMA device library
  2021-07-13 12:27 ` [dpdk-dev] [PATCH v3] " Chengwen Feng
                     ` (2 preceding siblings ...)
  2021-07-14 12:22   ` Nipun Gupta
@ 2021-07-14 16:05   ` Bruce Richardson
  2021-07-15  7:10   ` Jerin Jacob
  4 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-14 16:05 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

On Tue, Jul 13, 2021 at 08:27:43PM +0800, Chengwen Feng wrote:
> This patch introduce 'dmadevice' which is a generic type of DMA
> device.
> 
> The APIs of dmadev library exposes some generic operations which can
> enable configuration and I/O with the DMA devices.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>

More review comments - mostly stylistic - inline below.

/Bruce

> ---
> v3:
> * rm reset and fill_sg ops.
> * rm MT-safe capabilities.
> * add submit flag.
> * redefine rte_dma_sg to implement asymmetric copy.
> * delete some reserved field for future use.
> * rearrangement rte_dmadev/rte_dmadev_data struct.
> * refresh rte_dmadev.h copyright.
> * update vchan setup parameter.
> * modified some inappropriate descriptions.
> * arrange version.map alphabetically.
> * other minor modifications from review comment.
> ---
<snip>

> +
> +#include <rte_common.h>
> +#include <rte_compat.h>
> +#ifdef RTE_DMADEV_DEBUG
> +#include <rte_dev.h>
> +#endif

I don't see the value in conditionally including this. I'd simplify by just
always including it.

> +#include <rte_errno.h>
> +#include <rte_memory.h>
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * @param dev_id
> + *   DMA device index.
> + *
> + * @return
> + *   - If the device index is valid (true) or not (false).
> + */
> +__rte_experimental
> +bool
> +rte_dmadev_is_valid_dev(uint16_t dev_id);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Get the total number of DMA devices that have been successfully
> + * initialised.
> + *
> + * @return
> + *   The total number of usable DMA devices.
> + */
> +__rte_experimental
> +uint16_t
> +rte_dmadev_count(void);
> +
> +/**
> + * The capabilities of a DMA device
> + */
This should be a non-doxygen comment, as it doesn't apply to a code
element.

> +#define RTE_DMA_DEV_CAPA_MEM_TO_MEM	(1ull << 0)
> +/**< DMA device support memory-to-memory transfer.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
These comments should come before the items they refer to, not after.

> +#define RTE_DMA_DEV_CAPA_MEM_TO_DEV	(1ull << 1)
> +/**< DMA device support memory-to-device transfer.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_DEV_TO_MEM	(1ull << 2)
> +/**< DMA device support device-to-memory transfer.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_DEV_TO_DEV	(1ull << 3)
> +/**< DMA device support device-to-device transfer.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_OPS_COPY	(1ull << 4)

Do we want to leave gaps in the flags so that they are grouped by op type?
Is it possible that we might ahve more RTE_DMA_DEV_X_TO_Y flags in future,
because if so, we should move this out to bit 8, for example.

> +/**< DMA device support copy ops.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_OPS_FILL	(1ull << 5)
> +/**< DMA device support fill ops.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_OPS_SG		(1ull << 6)
> +/**< DMA device support scatter-list ops.
> + * If device support ops_copy and ops_sg, it means supporting copy_sg ops.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */

Rather than a general SG flag, this should probably be for SG_COPY, since
we aren't offering an SG_FILL option.

> +#define RTE_DMA_DEV_CAPA_FENCE		(1ull << 7)
> +/**< DMA device support fence.
> + * If device support fence, then application could set a fence flags when
> + * enqueue operation by rte_dma_copy/copy_sg/fill/fill_sg.
> + * If a operation has a fence flags, it means the operation must be processed
> + * only after all previous operations are completed.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */

Drop this flag as unnecessary. All devices either always provide ordering
guarantee - in which case it's a no-op - or else support the flag.

> +#define RTE_DMA_DEV_CAPA_SVA		(1ull << 8)

Again, if we are ok to leave gaps, I'd suggest moving this one well down,
e.g. to bit 32.

> +/**< DMA device support SVA which could use VA as DMA address.
> + * If device support SVA then application could pass any VA address like memory
> + * from rte_malloc(), rte_memzone(), malloc, stack memory.
> + * If device don't support SVA, then application should pass IOVA address which
> + * from rte_malloc(), rte_memzone().
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +
> +/**
> + * A structure used to retrieve the contextual information of
> + * an DMA device
> + */
> +struct rte_dmadev_info {
> +	struct rte_device *device; /**< Generic Device information */
> +	uint64_t dev_capa; /**< Device capabilities (RTE_DMA_DEV_CAPA_*) */
> +	/** Maximum number of virtual DMA channels supported */
> +	uint16_t max_vchans;
> +	/** Maximum allowed number of virtual DMA channel descriptors */
> +	uint16_t max_desc;
> +	/** Minimum allowed number of virtual DMA channel descriptors */
> +	uint16_t min_desc;
> +	uint16_t nb_vchans; /**< Number of virtual DMA channel configured */
> +};

Minor nit - I suggest standardizing the comment format here and have them
all either before, or all afterwards. Since they won't all fit in your
80-column limit, make all comments appear before the item.

> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Retrieve the contextual information of a DMA device.

Suggest shortening to "Retrieve information about a DMA device". There is
no context info provided here.

> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param[out] dev_info
> + *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
> + *   contextual information of the device.

I'd drop the word "contextual" here too.

> + *
> + * @return
> + *   - =0: Success, driver updates the contextual information of the DMA device
> + *   - <0: Error code returned by the driver info get function.
> + *
> + */
> +__rte_experimental
> +int
> +rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
<snip>
> +
> +/**
> + * DMA transfer direction defines.
> + */
> +#define RTE_DMA_MEM_TO_MEM	(1ull << 0)
> +/**< DMA transfer direction - from memory to memory.
> + *
> + * @see struct rte_dmadev_vchan_conf::direction
> + */

As with other bit flags, please put the comments on top.

> +#define RTE_DMA_MEM_TO_DEV	(1ull << 1)
> +/**< DMA transfer direction - from memory to device.
> + * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs
> + * through the PCIE interface. In this case, the ARM SoCs works in EP(endpoint)
> + * mode, it could initiate a DMA move request from memory (which is ARM memory)
> + * to device (which is x86 host memory).
<snip>
> +/**
> + *  DMA flags to augment operation preparation.
> + *  Used as the 'flags' parameter of rte_dmadev_copy/fill.
> + */
> +#define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
> +/**< DMA fence flag
> + * It means the operation with this flag must be processed only after all
> + * previous operations are completed.
> + *
> + * @see rte_dmadev_copy()
> + * @see rte_dmadev_copy_sg()
> + * @see rte_dmadev_fill()
> + */
> +#define RTE_DMA_OP_FLAG_SUBMIT	(1ull << 1)
> +/**< DMA submit flag
> + * It means the operation with this flag must issue doorbell to hardware after
> + * enqueued jobs.
> + */

Comments before define.

> +
> +/**
<snip>
> +/**
> + * DMA transfer status code defines
> + */
> +enum rte_dma_status_code {
> +	/** The operation completed successfully */
> +	RTE_DMA_STATUS_SUCCESSFUL = 0,
> +	/** The operation failed to complete due active drop
> +	 * This is mainly used when processing dev_stop, allow outstanding
> +	 * requests to be completed as much as possible.
> +	 */
> +	RTE_DMA_STATUS_ACTIVE_DROP,

Is this saying that the operation is aborted? I'm not familiar with the
phrase "active drop".

> +	/** The operation failed to complete due invalid source address */
> +	RTE_DMA_STATUS_INVALID_SRC_ADDR,
> +	/** The operation failed to complete due invalid destination address */
> +	RTE_DMA_STATUS_INVALID_DST_ADDR,
> +	/** The operation failed to complete due invalid length */
> +	RTE_DMA_STATUS_INVALID_LENGTH,
> +	/** The operation failed to complete due invalid opcode
> +	 * The DMA descriptor could have multiple format, which are
> +	 * distinguished by the opcode field.
> +	 */
> +	RTE_DMA_STATUS_INVALID_OPCODE,
> +	/** The operation failed to complete due bus err */
> +	RTE_DMA_STATUS_BUS_ERROR,
> +	/** The operation failed to complete due data poison */
> +	RTE_DMA_STATUS_DATA_POISION,
> +	/** The operation failed to complete due descriptor read error */
> +	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
> +	/** The operation failed to complete due device link error
> +	 * Used to indicates that the link error in the mem-to-dev/dev-to-mem/
> +	 * dev-to-dev transfer scenario.
> +	 */
> +	RTE_DMA_STATUS_DEV_LINK_ERROR,
> +	/** The operation failed to complete due unknown reason */
> +	RTE_DMA_STATUS_UNKNOWN,
> +	/** Driver specific status code offset
> +	 * Start status code for the driver to define its own error code.
> +	 */
> +	RTE_DMA_STATUS_DRV_SPECIFIC_OFFSET = 0x10000,
> +};

I think we need a status error code for "not attempted", where jobs in a
particular batch are not attempted because they appeared after a fence
where a previous job failed. In our HW implementation it's possible for
jobs from later batches would be completed, though, so we need to report
the status from the not attempted jobs before reporting those newer
completed jobs.

> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Returns the number of operations that failed to complete.
> + * NOTE: This API was used when rte_dmadev_completed has_error was set.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param nb_status
> + *   Indicates the size of status array.
> + * @param[out] status
> + *   The error code of operations that failed to complete.
> + *   Some standard error code are described in 'enum rte_dma_status_code'
> + *   @see rte_dma_status_code
> + * @param[out] last_idx
> + *   The last failed completed operation's index.
> + *
> + * @return
> + *   The number of operations that failed to complete.
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vchan,
> +			   const uint16_t nb_status, uint32_t *status,
> +			   uint16_t *last_idx)
> +{

Switch the final two parameters around, so that the prototype matches that
of the previous completed() function, i.e. all start with dev_id, vchan,
"count", last_idx, and then only differ in the final parameter.

> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +#ifdef RTE_DMADEV_DEBUG
> +	if (!rte_dmadev_is_valid_dev(dev_id) ||
> +	    vchan >= dev->data->dev_conf.max_vchans ||
> +	    nb_status == 0 ||
> +	    status == NULL ||
> +	    last_idx == NULL)
> +		return -EINVAL;
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_fails, -ENOTSUP);
> +#endif

Unlike "completed" there is no fallback assigning to non-null parameters.
If we want to make the final two parameters mandatory, we should document
this.

> +	return (*dev->completed_fails)(dev, vchan, nb_status, status, last_idx);
> +}
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* _RTE_DMADEV_H_ */
> diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
> new file mode 100644
> index 0000000..b0b6494
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev_core.h
> @@ -0,0 +1,161 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + * Copyright(c) 2021 Intel Corporation.
> + */
> +
> +#ifndef _RTE_DMADEV_CORE_H_
> +#define _RTE_DMADEV_CORE_H_
> +
> +/**
> + * @file
> + *
> + * RTE DMA Device internal header.
> + *
> + * This header contains internal data types, that are used by the DMA devices
> + * in order to expose their ops to the class.
> + *
> + * Applications should not use these API directly.
> + *
> + */
> +
> +struct rte_dmadev;
> +
> +/** @internal Used to get device information of a device. */
> +typedef int (*dmadev_info_get_t)(const struct rte_dmadev *dev,
> +				 struct rte_dmadev_info *dev_info,
> +				 uint32_t info_sz);
> +

Since rte_dmadev_core.h is included in rte_dmadev.h, these will be in the
public namespace for all apps using dmadev, so they do need the "rte_"
prefix.

> +/** @internal Used to configure a device. */
> +typedef int (*dmadev_configure_t)(struct rte_dmadev *dev,
> +				  const struct rte_dmadev_conf *dev_conf);
<snip>
> +/**
> + * @internal
> + * The generic data structure associated with each DMA device.
> + *
> + * The dataplane APIs are located at the beginning of the structure, along
> + * with the pointer to where all the data elements for the particular device
> + * are stored in shared memory. This split scheme allows the function pointer
> + * and driver data to be per-process, while the actual configuration data for
> + * the device is shared.
> + */
> +struct rte_dmadev {
> +	dmadev_copy_t copy;
> +	dmadev_copy_sg_t copy_sg;
> +	dmadev_fill_t fill;
> +	dmadev_submit_t submit;
> +	dmadev_completed_t completed;
> +	dmadev_completed_fails_t completed_fails;
> +	void *reserved_ptr; /**< Reserved for future IO function */
> +	struct rte_dmadev_data *data; /**< Pointer to device data. */
> +

I think we will get better performance if we move this back down the
struct, and instead put a copy of the data->dev_private pointer in its
place. Driver implementations tend to use the private data much more than
the generic public data struct.

> +	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
> +	/** Device info which supplied during device initialization. */
> +	struct rte_device *device;
> +	enum rte_dmadev_state state; /**< Flag indicating the device state */
> +	uint64_t reserved[2]; /**< Reserved for future fields */
> +} __rte_cache_aligned;
> +
> +extern struct rte_dmadev rte_dmadevices[];
> +
> +#endif /* _RTE_DMADEV_CORE_H_ */
> diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h

<snip>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v3] dmadev: introduce DMA device library
  2021-07-13 13:37     ` Bruce Richardson
@ 2021-07-15  6:44       ` Jerin Jacob
  2021-07-15  8:25         ` Bruce Richardson
  0 siblings, 1 reply; 339+ messages in thread
From: Jerin Jacob @ 2021-07-15  6:44 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: fengchengwen, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On Tue, Jul 13, 2021 at 7:08 PM Bruce Richardson
<bruce.richardson@intel.com> wrote:
>
> On Tue, Jul 13, 2021 at 09:06:39PM +0800, fengchengwen wrote:
> > Thank you for your valuable comments, and I think we've taken a big step forward.
> >
> > @andrew Could you provide the copyright line so that I can add it to relevant file.
> >
> > @burce, jerin  Some unmodified review comments are returned here:
>
> Thanks. Some further comments inline below. Most points you make I'm ok
> with, but I do disagree on a number of others.
>
> /Bruce
>
> >
> > 1.
> > COMMENT: We allow up to 100 characters per line for DPDK code, so these don't need
> > to be wrapped so aggressively.
> >
> > REPLY: Our CI still has 80 characters limit, and I review most framework still comply.
> >
> Ok.
>
> > 2.
> > COMMENT: > +#define RTE_DMA_MEM_TO_MEM     (1ull << 0)
> > RTE_DMA_DIRECTION_...
> >
> > REPLY: add the 'DIRECTION' may the macro too long, I prefer keep it simple.
> >
> DIRECTION could be shortened to DIR, but I think this is probably ok as is
> too.
>

I prefer to keep DIR so that it easy to point in documentation like
@see RTE_DMA_DIR_*


> > 3.
> > COMMENT: > +rte_dmadev_vchan_release(uint16_t dev_id, uint16_t vchan);
> > We are not making release as pubic API in other device class. See ethdev spec.
> > bbdev/eventdev/rawdev
> >
> > REPLY: because ethdev's queue is hard-queue, and here is the software defined channels,
> > I think release is OK, BTW: bbdev/eventdev also have release ops.

I don't see any API like rte_event_queue_release() in event dev. It
has the only setup.

Typical flow is
1) configure() the N vchan
2) for i..N setup() the chan
3) start()
3) stop()
4) configure again with M vchan
5)  for i..M setup() the chan
5) start()

And above is documented at the beginning of the rte_dmadev.h header file.
I think, above sequence makes it easy for drivers. Just like other
device class _release can be
PMD hook which will be handled in configure() common code.



> >
> Ok


> > 4.  COMMENT:> +       uint64_t reserved[4]; /**< Reserved for future
> > fields */
> > > +};
> > Please add the capability for each counter in info structure as one
> > device may support all the counters.
> >
> > REPLY: This is a statistics function. If this function is not supported,
> > then do not need to implement the stats ops function. Also could to set
> > the unimplemented ones to zero.
> >
> +1
> The stats functions should be a minimum set that is supported by all
> drivers. Each of these stats can be easily tracked by software if HW
> support for it is not available, so I agree that we should not have each
> stat as a capability.

In our current HW, submitted_count and completed_count offloaded to HW.
In addition to that, we have a provision for getting stats for bytes
copied.( We can make it as xstat, if other drivers won't support)

our plan is to use enqueued_count and completed_fail_count in SW under
condition compilation flags or another scheme as it is in fastpath.

If we are not planning to add capability, IMO, we need to update the
documentation,
like unimplemented counters will return zero. But there is the
question of how to differentiate between
unimplemented vs genuine zero value. IMO, we can update the doc for
this case as well or
add capability.


>
> > 5.
> > COMMENT: > +#endif
> > > +       return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
> > Instead of every driver set the NOP function, In the common code, If
> > the CAPA is not set,
> > common code can set NOP function for this with <0 return value.
> >
> > REPLY: I don't think it's a good idea to judge in IO path, it's application duty to ensure
> > don't call API which driver not supported (which could get from capabilities).
> >
> For datapath functions, +1.

OK. Probably add some NOP function(returns it as error) in pmd.h so
that all drivers can reuse.
No strong opnion.

>
> > 6.
> > COMMENT: > +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vchan,
> > > +                          const uint16_t nb_status, uint32_t *status,
> > uint32_t -> enum rte_dma_status_code
> >
> > REPLY:I'm still evaluating this. It takes a long time for the driver to perform error code
> > conversion in this API. Do we need to provide an error code conversion function alone ?
> >
> It's not that difficult a conversion to do, and so long as we have the
> regular "completed" function which doesn't do all the error manipulation we
> should be fine. Performance in the case of errors is not expected to be as
> good, since errors should be very rare.

+1

>
> > 7.
> > COMMENT: > +typedef int (*dmadev_info_get_t)(struct rte_dmadev *dev,
> > > +                                struct rte_dmadev_info *dev_info);
> > Please change to rte_dmadev_info_get_t to avoid conflict due to namespace issue
> > as this header is exported.
> >
> > REPLY: I prefer not add 'rte_' prefix, it make the define too long.
> >
> I disagree on this, they need the rte_ prefix, despite the fact it makes
> them longer. If length is a concern, these can be changed from "dmadev_" to
> "rte_dma_", which is only one character longer.
> In fact, I believe Morten already suggested we use "rte_dma" rather than
> "rte_dmadev" as a function prefix across the library.

+1

>
> > 8.
> > COMMENT: > + *        - rte_dmadev_completed_fails()
> > > + *            - return the number of operation requests failed to complete.
> > Please rename this to "completed_status" to allow the return of information
> > other than just errors. As I suggested before, I think this should also be
> > usable as a slower version of "completed" even in the case where there are
> > no errors, in that it returns status information for each and every job
> > rather than just returning as soon as it hits a failure.
> >
> > REPLY: well, I think it maybe confuse (current OK/FAIL API is easy to understand.),
> > and we can build the slow path function on the two API.
> >
> I still disagree on this too. We have a "completed" op where we get
> informed of what has completed and minimal error indication, and a
> "completed_status" operation which provides status information for each
> operation completed, at the cost of speed.

+1

>
> > 9.
> > COMMENT: > +#define RTE_DMA_DEV_CAPA_MEM_TO_MEM       (1ull << 0)
> > > +/**< DMA device support mem-to-mem transfer.
> > Do we need this? Can we assume that any device appearing as a dmadev can
> > do mem-to-mem copies, and drop the capability for mem-to-mem and the
> > capability for copying?
> > also for RTE_DMA_DEV_CAPA_OPS_COPY
> >
> > REPLY: yes, I insist on adding this for the sake of conceptual integrity.
> > For ioat driver just make a statement.
> >
>
> Ok. It seems a wasted bit to me, but I don't see us running out of them
> soon.
>
> > 10.
> > COMMENT: > +  uint16_t nb_vchans; /**< Number of virtual DMA channel configured */
> > > +};
> > Let's add rte_dmadev_conf struct into this to return the configuration
> > settings.
> >
> > REPLY: If we add rte_dmadev_conf in, it may break ABI when rte_dmadev_conf add fields.
> >
> Yes, that is true, but I fail to see why that is a major problem. It just
> means that if the conf structure changes we have two functions to version
> instead of one. The information is still useful.
>
> If you don't want the actual conf structure explicitly put into the info
> struct, we can instead put the fields in directly. I really think that the
> info_get function should provide back to the user the details of what way
> the device was configured previously.
>
> regards,
> /Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v3] dmadev: introduce DMA device library
  2021-07-13 12:27 ` [dpdk-dev] [PATCH v3] " Chengwen Feng
                     ` (3 preceding siblings ...)
  2021-07-14 16:05   ` Bruce Richardson
@ 2021-07-15  7:10   ` Jerin Jacob
  2021-07-15  9:03     ` Bruce Richardson
  4 siblings, 1 reply; 339+ messages in thread
From: Jerin Jacob @ 2021-07-15  7:10 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: Thomas Monjalon, Ferruh Yigit, Richardson, Bruce, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

)
 a

On Tue, Jul 13, 2021 at 6:01 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
>
> This patch introduce 'dmadevice' which is a generic type of DMA
> device.
>
> The APIs of dmadev library exposes some generic operations which can
> enable configuration and I/O with the DMA devices.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>

Thanks for v3. Seems like all major items as covered. Some more
comments below inline.

I would suggest v4 to split the patch like (so that we can review and
ack each patch)
1) Only public header file with Doxygen inclusion, (There is a lot of
Doxygen syntax issue in the patch)
2) 1 or more patches for implementation.


> diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
> new file mode 100644
> index 0000000..f6cc4e5
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev.h
> @@ -0,0 +1,968 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + * Copyright(c) 2021 Intel Corporation.
> + * Copyright(c) 2021 Marvell International Ltd.
> + * Copyright(c) 2021 SmartShare Systems.
> + */
> +
> +#ifndef _RTE_DMADEV_H_
> +#define _RTE_DMADEV_H_
> +
> +/**
> + * @file rte_dmadev.h
> + *
> + * RTE DMA (Direct Memory Access) device APIs.
> + *
> + * The DMA framework is built on the following model:
> + *
> + *     ---------------   ---------------       ---------------
> + *     | virtual DMA |   | virtual DMA |       | virtual DMA |
> + *     | channel     |   | channel     |       | channel     |
> + *     ---------------   ---------------       ---------------
> + *            |                |                      |
> + *            ------------------                      |
> + *                     |                              |
> + *               ------------                    ------------
> + *               |  dmadev  |                    |  dmadev  |
> + *               ------------                    ------------
> + *                     |                              |
> + *            ------------------               ------------------
> + *            | HW-DMA-channel |               | HW-DMA-channel |
> + *            ------------------               ------------------
> + *                     |                              |
> + *                     --------------------------------
> + *                                     |
> + *                           ---------------------
> + *                           | HW-DMA-Controller |
> + *                           ---------------------
> + *
> + * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
> + * each HW-DMA-channel should be represented by a dmadev.
> + *
> + * The dmadev could create multiple virtual DMA channel, each virtual DMA
> + * channel represents a different transfer context. The DMA operation request
> + * must be submitted to the virtual DMA channel.
> + * E.G. Application could create virtual DMA channel 0 for mem-to-mem transfer
> + *      scenario, and create virtual DMA channel 1 for mem-to-dev transfer
> + *      scenario.
> + *
> + * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
> + * PCI/SoC device probing phase performed at EAL initialization time. And could
> + * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
> + * phase.
> + *
> + * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
> + * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
> + *
> + * The functions exported by the dmadev API to setup a device designated by its
> + * device identifier must be invoked in the following order:
> + *     - rte_dmadev_configure()
> + *     - rte_dmadev_vchan_setup()
> + *     - rte_dmadev_start()
> + *
> + * Then, the application can invoke dataplane APIs to process jobs.
> + *
> + * If the application wants to change the configuration (i.e. call
> + * rte_dmadev_configure()), it must call rte_dmadev_stop() first to stop the
> + * device and then do the reconfiguration before calling rte_dmadev_start()
> + * again. The dataplane APIs should not be invoked when the device is stopped.
> + *
> + * Finally, an application can close a dmadev by invoking the
> + * rte_dmadev_close() function.
> + *
> + * The dataplane APIs include two parts:
> + *   a) The first part is the submission of operation requests:
> + *        - rte_dmadev_copy()
> + *        - rte_dmadev_copy_sg() - scatter-gather form of copy
> + *        - rte_dmadev_fill()
> + *        - rte_dmadev_fill_sg() - scatter-gather form of fill

rte_dmadev_fill_sg already removed.


> + *        - rte_dmadev_perform() - issue doorbell to hardware
> + *      These APIs could work with different virtual DMA channels which have
> + *      different contexts.

Please tell about SUBMIT flag option as well.


> + *      The first four APIs are used to submit the operation request to the
> + *      virtual DMA channel, if the submission is successful, a uint16_t
> + *      ring_idx is returned, otherwise a negative number is returned.
> + *   b) The second part is to obtain the result of requests:
> + *        - rte_dmadev_completed()
> + *            - return the number of operation requests completed successfully.
> + *        - rte_dmadev_completed_fails()
> + *            - return the number of operation requests failed to complete.
> + *
> + * About the ring_idx which rte_dmadev_copy/copy_sg/fill/fill_sg() returned,
> + * the rules are as follows:
> + *   a) ring_idx for each virtual DMA channel are independent.
> + *   b) For a virtual DMA channel, the ring_idx is monotonically incremented,
> + *      when it reach UINT16_MAX, it wraps back to zero.
> + *   c) This ring_idx can be used by applications to track per-operation
> + *      metadata in an application-defined circular ring.
> + *   d) The initial ring_idx of a virtual DMA channel is zero, after the device
> + *      is stopped, the ring_idx needs to be reset to zero.
> + *   Example:
> + *      step-1: start one dmadev
> + *      step-2: enqueue a copy operation, the ring_idx return is 0
> + *      step-3: enqueue a copy operation again, the ring_idx return is 1
> + *      ...
> + *      step-101: stop the dmadev
> + *      step-102: start the dmadev
> + *      step-103: enqueue a copy operation, the cookie return is 0
> + *      ...
> + *      step-x+0: enqueue a fill operation, the ring_idx return is 65535
> + *      step-x+1: enqueue a copy operation, the ring_idx return is 0
> + *      ...
> + *
> + * By default, all the functions of the dmadev API exported by a PMD are
> + * lock-free functions which assume to not be invoked in parallel on different
> + * logical cores to work on the same target object.
> + *
> + */
> +
> +#include <rte_common.h>
> +#include <rte_compat.h>
> +#ifdef RTE_DMADEV_DEBUG
> +#include <rte_dev.h>
> +#endif
> +#include <rte_errno.h>
> +#include <rte_memory.h>
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +#define RTE_DMADEV_NAME_MAX_LEN        RTE_DEV_NAME_MAX_LEN
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * @param dev_id
> + *   DMA device index.
> + *
> + * @return
> + *   - If the device index is valid (true) or not (false).
> + */
> +__rte_experimental
> +bool
> +rte_dmadev_is_valid_dev(uint16_t dev_id);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Get the total number of DMA devices that have been successfully
> + * initialised.
> + *
> + * @return
> + *   The total number of usable DMA devices.
> + */
> +__rte_experimental
> +uint16_t
> +rte_dmadev_count(void);
> +
> +/**
> + * The capabilities of a DMA device
> + */
> +#define RTE_DMA_DEV_CAPA_MEM_TO_MEM    (1ull << 0)
> +/**< DMA device support memory-to-memory transfer.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_MEM_TO_DEV    (1ull << 1)
> +/**< DMA device support memory-to-device transfer.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_DEV_TO_MEM    (1ull << 2)
> +/**< DMA device support device-to-memory transfer.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_DEV_TO_DEV    (1ull << 3)
> +/**< DMA device support device-to-device transfer.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_OPS_COPY      (1ull << 4)
> +/**< DMA device support copy ops.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_OPS_FILL      (1ull << 5)
> +/**< DMA device support fill ops.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_OPS_SG                (1ull << 6)
> +/**< DMA device support scatter-list ops.
> + * If device support ops_copy and ops_sg, it means supporting copy_sg ops.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_FENCE         (1ull << 7)
> +/**< DMA device support fence.
> + * If device support fence, then application could set a fence flags when
> + * enqueue operation by rte_dma_copy/copy_sg/fill/fill_sg.
> + * If a operation has a fence flags, it means the operation must be processed
> + * only after all previous operations are completed.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +#define RTE_DMA_DEV_CAPA_SVA           (1ull << 8)
> +/**< DMA device support SVA which could use VA as DMA address.
> + * If device support SVA then application could pass any VA address like memory
> + * from rte_malloc(), rte_memzone(), malloc, stack memory.
> + * If device don't support SVA, then application should pass IOVA address which
> + * from rte_malloc(), rte_memzone().
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +
> +/**
> + * A structure used to retrieve the contextual information of
> + * an DMA device
> + */
> +struct rte_dmadev_info {
> +       struct rte_device *device; /**< Generic Device information */
> +       uint64_t dev_capa; /**< Device capabilities (RTE_DMA_DEV_CAPA_*) */
> +       /** Maximum number of virtual DMA channels supported */
> +       uint16_t max_vchans;
> +       /** Maximum allowed number of virtual DMA channel descriptors */
> +       uint16_t max_desc;
> +       /** Minimum allowed number of virtual DMA channel descriptors */
> +       uint16_t min_desc;
> +       uint16_t nb_vchans; /**< Number of virtual DMA channel configured */
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Retrieve the contextual information of a DMA device.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param[out] dev_info
> + *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
> + *   contextual information of the device.
> + *
> + * @return
> + *   - =0: Success, driver updates the contextual information of the DMA device
> + *   - <0: Error code returned by the driver info get function.
> + *
> + */
> +__rte_experimental
> +int
> +rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
> +
> +/**
> + * A structure used to configure a DMA device.
> + */
> +struct rte_dmadev_conf {
> +       /** Maximum number of virtual DMA channel to use.
> +        * This value cannot be greater than the field 'max_vchans' of struct
> +        * rte_dmadev_info which get from rte_dmadev_info_get().
> +        */
> +       uint16_t max_vchans;
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Configure a DMA device.
> + *
> + * This function must be invoked first before any other function in the
> + * API. This function can also be re-invoked when a device is in the
> + * stopped state.
> + *
> + * @param dev_id
> + *   The identifier of the device to configure.
> + * @param dev_conf
> + *   The DMA device configuration structure encapsulated into rte_dmadev_conf
> + *   object.
> + *
> + * @return
> + *   - =0: Success, device configured.
> + *   - <0: Error code returned by the driver configuration function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Start a DMA device.
> + *
> + * The device start step is the last one and consists of setting the DMA
> + * to start accepting jobs.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - =0: Success, device started.
> + *   - <0: Error code returned by the driver start function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_start(uint16_t dev_id);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Stop a DMA device.
> + *
> + * The device can be restarted with a call to rte_dmadev_start()
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - =0: Success, device stopped.
> + *   - <0: Error code returned by the driver stop function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_stop(uint16_t dev_id);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Close a DMA device.
> + *
> + * The device cannot be restarted after this call.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *  - =0: Successfully close device
> + *  - <0: Failure to close device
> + */
> +__rte_experimental
> +int
> +rte_dmadev_close(uint16_t dev_id);
> +
> +/**
> + * DMA transfer direction defines.
> + */
> +#define RTE_DMA_MEM_TO_MEM     (1ull << 0)

RTE_DMA_DIR_MEM_TO_MEM


> +/**< DMA transfer direction - from memory to memory.
> + *
> + * @see struct rte_dmadev_vchan_conf::direction
> + */
> +#define RTE_DMA_MEM_TO_DEV     (1ull << 1)
> +/**< DMA transfer direction - from memory to device.
> + * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs
> + * through the PCIE interface. In this case, the ARM SoCs works in EP(endpoint)
> + * mode, it could initiate a DMA move request from memory (which is ARM memory)
> + * to device (which is x86 host memory).
> + *
> + * @see struct rte_dmadev_vchan_conf::direction

Also point rte_dmadev_port_parameters::port_type

> + */
> +#define RTE_DMA_DEV_TO_MEM     (1ull << 2)
> +/**< DMA transfer direction - from device to memory.
> + * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs
> + * through the PCIE interface. In this case, the ARM SoCs works in EP(endpoint)
> + * mode, it could initiate a DMA move request from device (which is x86 host
> + * memory) to memory (which is ARM memory).
> + *
> + * @see struct rte_dmadev_vchan_conf::direction

Also point rte_dmadev_port_parameters::port_type

> + */
> +#define RTE_DMA_DEV_TO_DEV     (1ull << 3)
> +/**< DMA transfer direction - from device to device.
> + * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs
> + * through the PCIE interface. In this case, the ARM SoCs works in EP(endpoint)
> + * mode, it could initiate a DMA move request from device (which is x86 host
> + * memory) to device (which is another x86 host memory).
> + *
> + * @see struct rte_dmadev_vchan_conf::direction

Also point rte_dmadev_port_parameters::port_type

> + */
> +#define RTE_DMA_TRANSFER_DIR_ALL       (RTE_DMA_MEM_TO_MEM | \
> +                                        RTE_DMA_MEM_TO_DEV | \
> +                                        RTE_DMA_DEV_TO_MEM | \
> +                                        RTE_DMA_DEV_TO_DEV)

RTE_DMA_DIR_ALL ??

> +
> +/**
> + * enum rte_dmadev_port_type - DMA port type defines
> + * When
> + */
> +enum rte_dmadev_port_type {
> +       /** The device port type is PCIE. */
> +       RTE_DMADEV_PORT_OF_PCIE = 1,

Is OF required ? RTE_DMADEV_PORT_PCIE

> +};
> +
> +/**
> + * A structure used to descript DMA port parameters.
> + */
> +struct rte_dmadev_port_parameters {

Please make this as param or params. rte_dmadev_port_param


> +       enum rte_dmadev_port_type port_type;
missing doxgen comment for this.
> +       union {
> +               /** For PCIE port
> +                *
> +                * The following model show SoC's PCIE module connects to
> +                * multiple PCIE hosts and multiple endpoints. The PCIE module
> +                * has an integrate DMA controller.
> +                * If the DMA wants to access the memory of host A, it can be
> +                * initiated by PF1 in core0, or by VF0 of PF0 in core0.
> +                *
> +                * System Bus
> +                *    |     ----------PCIE module----------
> +                *    |     Bus
> +                *    |     Interface
> +                *    |     -----        ------------------
> +                *    |     |   |        | PCIE Core0     |
> +                *    |     |   |        |                |        -----------
> +                *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
> +                *    |     |   |--------|        |- VF-1 |--------| Root    |
> +                *    |     |   |        |   PF-1         |        | Complex |
> +                *    |     |   |        |   PF-2         |        -----------
> +                *    |     |   |        ------------------
> +                *    |     |   |
> +                *    |     |   |        ------------------
> +                *    |     |   |        | PCIE Core1     |
> +                *    |     |   |        |                |        -----------
> +                *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
> +                *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
> +                *    |     |   |        |        |- VF-1 |        | Complex |
> +                *    |     |   |        |   PF-2         |        -----------
> +                *    |     |   |        ------------------
> +                *    |     |   |
> +                *    |     |   |        ------------------
> +                *    |     |DMA|        |                |        ------
> +                *    |     |   |        |                |--------| EP |
> +                *    |     |   |--------| PCIE Core2     |        ------
> +                *    |     |   |        |                |        ------
> +                *    |     |   |        |                |--------| EP |
> +                *    |     |   |        |                |        ------
> +                *    |     -----        ------------------
> +                *
> +                * The following structure is used to describe the above access
> +                * port.
> +                */
> +               struct {
> +                       uint64_t coreid : 3; /**< PCIE core id used */
> +                       uint64_t pfid : 6; /**< PF id used */
> +                       uint64_t vfen : 1; /**< VF enable bit */
> +                       uint64_t vfid : 8; /**< VF id used */

We support up to 12bit. So please make this as 12bit.
Also, this is in a slow path, we may not bitfield here.

> +                       /** The pasid filed in TLP packet */
> +                       uint64_t pasid : 20;
> +                       /** The attributes filed in TLP packet */
> +                       uint64_t attr : 3;
> +                       /** The processing hint filed in TLP packet */
> +                       uint64_t ph : 2;
> +                       /** The steering tag filed in TLP packet */
> +                       uint64_t st : 16;

We don't support a few attributes like passid, ph, st. Do we need
the capability of this? or ignore this. In either case, please update the doc.

We also support additional flags for allocating LLC flag.
This is a hint to DMA engine that the cache blocks should be allocated
in the LLC (if they were not already).
When the MEM pointer is a destination in DMA operation, the referenced
cache blocks are allocated into the cache as part of completing the
DMA (when not already present in the LLC)
this is helpful if software has to access the data right after dma is completed.

Could you add bit or flag for the same?



> +               } pcie;
> +       };
> +       uint64_t reserved[2]; /**< Reserved for future fields */
> +};
> +
> +/**
> + * A structure used to configure a virtual DMA channel.
> + */
> +struct rte_dmadev_vchan_conf {
> +       uint8_t direction;
> +       /**< Set of supported transfer directions
> +        * @see RTE_DMA_MEM_TO_MEM
> +        * @see RTE_DMA_MEM_TO_DEV
> +        * @see RTE_DMA_DEV_TO_MEM
> +        * @see RTE_DMA_DEV_TO_DEV

Since we can set of only one direction per vchan . Should be we make
it as enum to
make it clear.

> +        */
> +       /** Number of descriptor for the virtual DMA channel */
> +       uint16_t nb_desc;
> +       /** 1) Used to describes the port parameter in the device-to-memory
> +        * transfer scenario.
> +        * 2) Used to describes the source port parameter in the
> +        * device-to-device transfer scenario.
> +        * @see struct rte_dmadev_port_parameters
> +        */
> +       struct rte_dmadev_port_parameters src_port;
> +       /** 1) Used to describes the port parameter in the memory-to-device-to
> +        * transfer scenario.
> +        * 2) Used to describes the destination port parameter in the
> +        * device-to-device transfer scenario.
> +        * @see struct rte_dmadev_port_parameters
> +        */
> +       struct rte_dmadev_port_parameters dst_port;
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Allocate and set up a virtual DMA channel.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param conf
> + *   The virtual DMA channel configuration structure encapsulated into
> + *   rte_dmadev_vchan_conf object.
> + *
> + * @return
> + *   - >=0: Allocate success, it is the virtual DMA channel id. This value must
> + *          be less than the field 'max_vchans' of struct rte_dmadev_conf
> + *          which configured by rte_dmadev_configure().
> + *   - <0: Error code returned by the driver virtual channel setup function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_vchan_setup(uint16_t dev_id,
> +                      const struct rte_dmadev_vchan_conf *conf);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Release a virtual DMA channel.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel which return by vchan setup.
> + *
> + * @return
> + *   - =0: Successfully release the virtual DMA channel.
> + *   - <0: Error code returned by the driver virtual channel release function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_vchan_release(uint16_t dev_id, uint16_t vchan);

I would like to remove this to align with other device class in DPDK and use
configure and start again if there change in vchannel setup/

> +
> +/**
> + * rte_dmadev_stats - running statistics.
> + */
> +struct rte_dmadev_stats {
> +       /** Count of operations which were successfully enqueued */
> +       uint64_t enqueued_count;
> +       /** Count of operations which were submitted to hardware */
> +       uint64_t submitted_count;
> +       /** Count of operations which failed to complete */
> +       uint64_t completed_fail_count;
> +       /** Count of operations which successfully complete */
> +       uint64_t completed_count;
> +};

Provided comment on stats, in another thread.

> +
> +#define RTE_DMADEV_ALL_VCHAN   0xFFFFu

RTE_DMADEV_VCHAN_ALL ??

> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Retrieve basic statistics of a or all virtual DMA channel(s).
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
> + * @param[out] stats
> + *   The basic statistics structure encapsulated into rte_dmadev_stats
> + *   object.
> + *
> + * @return
> + *   - =0: Successfully retrieve stats.
> + *   - <0: Failure to retrieve stats.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
> +                    struct rte_dmadev_stats *stats);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Reset basic statistics of a or all virtual DMA channel(s).
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
> + *
> + * @return
> + *   - =0: Successfully reset stats.
> + *   - <0: Failure to reset stats.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Dump DMA device info.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param f
> + *   The file to write the output to.
> + *
> + * @return
> + *   0 on success. Non-zero otherwise.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_dump(uint16_t dev_id, FILE *f);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Trigger the dmadev self test.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - 0: Selftest successful.
> + *   - -ENOTSUP if the device doesn't support selftest
> + *   - other values < 0 on failure.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_selftest(uint16_t dev_id);
> +
> +/**
> + * rte_dma_sge - can hold scatter DMA operation request entry
> + */
> +struct rte_dma_sge {
> +       rte_iova_t addr;
> +       uint32_t length;
> +};
> +
> +/**
> + * rte_dma_sg - can hold scatter DMA operation request
> + */
> +struct rte_dma_sg {
> +       struct rte_dma_sge *src;
> +       struct rte_dma_sge *dst;
> +       uint16_t nb_src; /**< The number of src entry */
> +       uint16_t nb_dst; /**< The number of dst entry */
> +};
> +
> +#include "rte_dmadev_core.h"
> +
> +/**
> + *  DMA flags to augment operation preparation.
> + *  Used as the 'flags' parameter of rte_dmadev_copy/fill.
> + */
> +#define RTE_DMA_OP_FLAG_FENCE  (1ull << 0)
> +/**< DMA fence flag
> + * It means the operation with this flag must be processed only after all
> + * previous operations are completed.
> + *
> + * @see rte_dmadev_copy()
> + * @see rte_dmadev_copy_sg()
> + * @see rte_dmadev_fill()
> + */
> +#define RTE_DMA_OP_FLAG_SUBMIT (1ull << 1)
> +/**< DMA submit flag
> + * It means the operation with this flag must issue doorbell to hardware after
> + * enqueued jobs.
> + */
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a copy operation onto the virtual DMA channel.
> + *
> + * This queues up a copy operation to be performed by hardware, but does not
> + * trigger hardware to begin that operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param src
> + *   The address of the source buffer.
> + * @param dst
> + *   The address of the destination buffer.
> + * @param length
> + *   The length of the data to be copied.
> + * @param flags
> + *   An flags for this operation.
> + *   @see RTE_DMA_OP_FLAG_*
> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued copy job.
> + *   - <0: Error code returned by the driver copy function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
> +               uint32_t length, uint64_t flags)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans)
> +               return -EINVAL;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
> +#endif
> +       return (*dev->copy)(dev, vchan, src, dst, length, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a scatter list copy operation onto the virtual DMA channel.
> + *
> + * This queues up a scatter list copy operation to be performed by hardware,
> + * but does not trigger hardware to begin that operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.,
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param sg
> + *   The pointer of scatterlist.
> + * @param flags
> + *   An flags for this operation.
> + *   @see RTE_DMA_OP_FLAG_*
> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued copy scatterlist job.
> + *   - <0: Error code returned by the driver copy scatterlist function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, const struct rte_dma_sg *sg,
> +                  uint64_t flags)

In order to avoid population of rte_dma_sg in stack (as it is
fastpath), I would like
to change the API as
rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dma_sge
*src,  struct rte_dma_sge *dst,   uint16_t nb_src, uint16_t nb_dst,
uint64_t flags)


> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans ||
> +           sg == NULL)
> +               return -EINVAL;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
> +#endif
> +       return (*dev->copy_sg)(dev, vchan, sg, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a fill operation onto the virtual DMA channel.
> + *
> + * This queues up a fill operation to be performed by hardware, but does not
> + * trigger hardware to begin that operation.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param pattern
> + *   The pattern to populate the destination buffer with.
> + * @param dst
> + *   The address of the destination buffer.
> + * @param length
> + *   The length of the destination buffer.
> + * @param flags
> + *   An flags for this operation.
> + *   @see RTE_DMA_OP_FLAG_*
> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued fill job.
> + *   - <0: Error code returned by the driver fill function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
> +               rte_iova_t dst, uint32_t length, uint64_t flags)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans)
> +               return -EINVAL;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
> +#endif
> +       return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Trigger hardware to begin performing enqueued operations.
> + *
> + * This API is used to write the "doorbell" to the hardware to trigger it
> + * to begin the operations previously enqueued by rte_dmadev_copy/fill()
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + *
> + * @return
> + *   - =0: Successfully trigger hardware.
> + *   - <0: Failure to trigger hardware.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans)
> +               return -EINVAL;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
> +#endif
> +       return (*dev->submit)(dev, vchan);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Returns the number of operations that have been successfully completed.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param nb_cpls
> + *   The maximum number of completed operations that can be processed.
> + * @param[out] last_idx
> + *   The last completed operation's index.
> + *   If not required, NULL can be passed in.
> + * @param[out] has_error
> + *   Indicates if there are transfer error.
> + *   If not required, NULL can be passed in.
> + *
> + * @return
> + *   The number of operations that successfully completed.
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
> +                    uint16_t *last_idx, bool *has_error)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       uint16_t idx;
> +       bool err;
> +
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans ||
> +           nb_cpls == 0)
> +               return -EINVAL;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, -ENOTSUP);
> +#endif
> +
> +       /* Ensure the pointer values are non-null to simplify drivers.
> +        * In most cases these should be compile time evaluated, since this is
> +        * an inline function.
> +        * - If NULL is explicitly passed as parameter, then compiler knows the
> +        *   value is NULL
> +        * - If address of local variable is passed as parameter, then compiler
> +        *   can know it's non-NULL.
> +        */
> +       if (last_idx == NULL)
> +               last_idx = &idx;
> +       if (has_error == NULL)
> +               has_error = &err;
> +
> +       *has_error = false;
> +       return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
> +}
> +
> +/**
> + * DMA transfer status code defines
> + */
> +enum rte_dma_status_code {
> +       /** The operation completed successfully */
> +       RTE_DMA_STATUS_SUCCESSFUL = 0,
> +       /** The operation failed to complete due active drop
> +        * This is mainly used when processing dev_stop, allow outstanding
> +        * requests to be completed as much as possible.
> +        */
> +       RTE_DMA_STATUS_ACTIVE_DROP,
> +       /** The operation failed to complete due invalid source address */
> +       RTE_DMA_STATUS_INVALID_SRC_ADDR,
> +       /** The operation failed to complete due invalid destination address */
> +       RTE_DMA_STATUS_INVALID_DST_ADDR,
> +       /** The operation failed to complete due invalid length */
> +       RTE_DMA_STATUS_INVALID_LENGTH,
> +       /** The operation failed to complete due invalid opcode
> +        * The DMA descriptor could have multiple format, which are
> +        * distinguished by the opcode field.
> +        */
> +       RTE_DMA_STATUS_INVALID_OPCODE,
> +       /** The operation failed to complete due bus err */
> +       RTE_DMA_STATUS_BUS_ERROR,
> +       /** The operation failed to complete due data poison */
> +       RTE_DMA_STATUS_DATA_POISION,
> +       /** The operation failed to complete due descriptor read error */
> +       RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
> +       /** The operation failed to complete due device link error
> +        * Used to indicates that the link error in the mem-to-dev/dev-to-mem/
> +        * dev-to-dev transfer scenario.
> +        */
> +       RTE_DMA_STATUS_DEV_LINK_ERROR,
> +       /** The operation failed to complete due unknown reason */
> +       RTE_DMA_STATUS_UNKNOWN,
> +       /** Driver specific status code offset
> +        * Start status code for the driver to define its own error code.
> +        */
> +       RTE_DMA_STATUS_DRV_SPECIFIC_OFFSET = 0x10000,
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Returns the number of operations that failed to complete.
> + * NOTE: This API was used when rte_dmadev_completed has_error was set.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param nb_status
> + *   Indicates the size of status array.
> + * @param[out] status
> + *   The error code of operations that failed to complete.
> + *   Some standard error code are described in 'enum rte_dma_status_code'
> + *   @see rte_dma_status_code
> + * @param[out] last_idx
> + *   The last failed completed operation's index.
> + *
> + * @return
> + *   The number of operations that failed to complete.
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_dmadev_completed_fails(uint16_t dev_id, uint16_t vchan,
> +                          const uint16_t nb_status, uint32_t *status,
> +                          uint16_t *last_idx)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans ||
> +           nb_status == 0 ||
> +           status == NULL ||
> +           last_idx == NULL)
> +               return -EINVAL;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_fails, -ENOTSUP);
> +#endif
> +       return (*dev->completed_fails)(dev, vchan, nb_status, status, last_idx);
> +}
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* _RTE_DMADEV_H_ */
> diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
> new file mode 100644
> index 0000000..b0b6494
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev_core.h
> @@ -0,0 +1,161 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + * Copyright(c) 2021 Intel Corporation.
> + */
> +
> +#ifndef _RTE_DMADEV_CORE_H_
> +#define _RTE_DMADEV_CORE_H_
> +
> +/**
> + * @file
> + *
> + * RTE DMA Device internal header.
> + *
> + * This header contains internal data types, that are used by the DMA devices
> + * in order to expose their ops to the class.
> + *
> + * Applications should not use these API directly.
> + *
> + */
> +
> +struct rte_dmadev;
> +
> +/** @internal Used to get device information of a device. */
> +typedef int (*dmadev_info_get_t)(const struct rte_dmadev *dev,
> +                                struct rte_dmadev_info *dev_info,
> +                                uint32_t info_sz);
> +
> +/** @internal Used to configure a device. */
> +typedef int (*dmadev_configure_t)(struct rte_dmadev *dev,
> +                                 const struct rte_dmadev_conf *dev_conf);
> +
> +/** @internal Used to start a configured device. */
> +typedef int (*dmadev_start_t)(struct rte_dmadev *dev);
> +
> +/** @internal Used to stop a configured device. */
> +typedef int (*dmadev_stop_t)(struct rte_dmadev *dev);
> +
> +/** @internal Used to close a configured device. */
> +typedef int (*dmadev_close_t)(struct rte_dmadev *dev);
> +
> +/** @internal Used to allocate and set up a virtual DMA channel. */
> +typedef int (*dmadev_vchan_setup_t)(struct rte_dmadev *dev,
> +                                   const struct rte_dmadev_vchan_conf *conf);
> +
> +/** @internal Used to release a virtual DMA channel. */
> +typedef int (*dmadev_vchan_release_t)(struct rte_dmadev *dev, uint16_t vchan);
> +
> +/** @internal Used to retrieve basic statistics. */
> +typedef int (*dmadev_stats_get_t)(const struct rte_dmadev *dev, uint16_t vchan,
> +                                 struct rte_dmadev_stats *stats,
> +                                 uint32_t stats_sz);
> +
> +/** @internal Used to reset basic statistics. */
> +typedef int (*dmadev_stats_reset_t)(struct rte_dmadev *dev, uint16_t vchan);
> +
> +/** @internal Used to dump internal information. */
> +typedef int (*dmadev_dump_t)(const struct rte_dmadev *dev, FILE *f);
> +
> +/** @internal Used to start dmadev selftest. */
> +typedef int (*dmadev_selftest_t)(uint16_t dev_id);
> +
> +/** @internal Used to enqueue a copy operation. */
> +typedef int (*dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
> +                            rte_iova_t src, rte_iova_t dst,
> +                            uint32_t length, uint64_t flags);
> +
> +/** @internal Used to enqueue a scatter list copy operation. */
> +typedef int (*dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
> +                               const struct rte_dma_sg *sg, uint64_t flags);
> +
> +/** @internal Used to enqueue a fill operation. */
> +typedef int (*dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
> +                            uint64_t pattern, rte_iova_t dst,
> +                            uint32_t length, uint64_t flags);
> +
> +/** @internal Used to trigger hardware to begin working. */
> +typedef int (*dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
> +
> +/** @internal Used to return number of successful completed operations. */
> +typedef uint16_t (*dmadev_completed_t)(struct rte_dmadev *dev, uint16_t vchan,
> +                                      const uint16_t nb_cpls,
> +                                      uint16_t *last_idx, bool *has_error);
> +
> +/** @internal Used to return number of failed completed operations. */
> +typedef uint16_t (*dmadev_completed_fails_t)(struct rte_dmadev *dev,
> +                       uint16_t vchan, const uint16_t nb_status,
> +                       uint32_t *status, uint16_t *last_idx);
> +
> +/**
> + * Possible states of a DMA device.
> + */
> +enum rte_dmadev_state {
> +       /** Device is unused before being probed. */
> +       RTE_DMADEV_UNUSED = 0,
> +       /** Device is attached when allocated in probing. */
> +       RTE_DMADEV_ATTACHED,
> +};
> +
> +/**
> + * DMA device operations function pointer table
> + */
> +struct rte_dmadev_ops {
> +       dmadev_info_get_t dev_info_get;
> +       dmadev_configure_t dev_configure;
> +       dmadev_start_t dev_start;
> +       dmadev_stop_t dev_stop;
> +       dmadev_close_t dev_close;
> +       dmadev_vchan_setup_t vchan_setup;
> +       dmadev_vchan_release_t vchan_release;
> +       dmadev_stats_get_t stats_get;
> +       dmadev_stats_reset_t stats_reset;
> +       dmadev_dump_t dev_dump;
> +       dmadev_selftest_t dev_selftest;
> +};
> +
> +/**
> + * @internal
> + * The data part, with no function pointers, associated with each DMA device.
> + *
> + * This structure is safe to place in shared memory to be common among different
> + * processes in a multi-process configuration.
> + */
> +struct rte_dmadev_data {
> +       void *dev_private; /**< PMD-specific private data. */
> +       uint16_t dev_id; /**< Device [external] identifier. */
> +       char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
> +       struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
> +       uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
> +       uint64_t reserved[2]; /**< Reserved for future fields */
> +} __rte_cache_aligned;
> +
> +/**
> + * @internal
> + * The generic data structure associated with each DMA device.
> + *
> + * The dataplane APIs are located at the beginning of the structure, along
> + * with the pointer to where all the data elements for the particular device
> + * are stored in shared memory. This split scheme allows the function pointer
> + * and driver data to be per-process, while the actual configuration data for
> + * the device is shared.
> + */
> +struct rte_dmadev {
> +       dmadev_copy_t copy;
> +       dmadev_copy_sg_t copy_sg;
> +       dmadev_fill_t fill;
> +       dmadev_submit_t submit;
> +       dmadev_completed_t completed;
> +       dmadev_completed_fails_t completed_fails;
> +       void *reserved_ptr; /**< Reserved for future IO function */
> +       struct rte_dmadev_data *data; /**< Pointer to device data. */
> +
> +       const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
> +       /** Device info which supplied during device initialization. */
> +       struct rte_device *device;
> +       enum rte_dmadev_state state; /**< Flag indicating the device state */
> +       uint64_t reserved[2]; /**< Reserved for future fields */
> +} __rte_cache_aligned;
> +
> +extern struct rte_dmadev rte_dmadevices[];
> +
> +#endif /* _RTE_DMADEV_CORE_H_ */
> diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
> new file mode 100644
> index 0000000..45141f9
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev_pmd.h
> @@ -0,0 +1,72 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + */
> +
> +#ifndef _RTE_DMADEV_PMD_H_
> +#define _RTE_DMADEV_PMD_H_
> +
> +/**
> + * @file
> + *
> + * RTE DMA Device PMD APIs
> + *
> + * Driver facing APIs for a DMA device. These are not to be called directly by
> + * any application.
> + */
> +
> +#include "rte_dmadev.h"
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +/**
> + * @internal
> + * Allocates a new dmadev slot for an DMA device and returns the pointer
> + * to that slot for the driver to use.
> + *
> + * @param name
> + *   DMA device name.
> + *
> + * @return
> + *   A pointer to the DMA device slot case of success,
> + *   NULL otherwise.
> + */
> +__rte_internal
> +struct rte_dmadev *
> +rte_dmadev_pmd_allocate(const char *name);
> +
> +/**
> + * @internal
> + * Release the specified dmadev.
> + *
> + * @param dev
> + *   Device to be released.
> + *
> + * @return
> + *   - 0 on success, negative on error
> + */
> +__rte_internal
> +int
> +rte_dmadev_pmd_release(struct rte_dmadev *dev);
> +
> +/**
> + * @internal
> + * Return the DMA device based on the device name.
> + *
> + * @param name
> + *   DMA device name.
> + *
> + * @return
> + *   A pointer to the DMA device slot case of success,
> + *   NULL otherwise.
> + */
> +__rte_internal
> +struct rte_dmadev *
> +rte_dmadev_get_device_by_name(const char *name);
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* _RTE_DMADEV_PMD_H_ */
> diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
> new file mode 100644
> index 0000000..2af78e4
> --- /dev/null
> +++ b/lib/dmadev/version.map
> @@ -0,0 +1,37 @@
> +EXPERIMENTAL {
> +       global:
> +
> +       rte_dmadev_close;
> +       rte_dmadev_completed;
> +       rte_dmadev_completed_fails;
> +       rte_dmadev_configure;
> +       rte_dmadev_copy;
> +       rte_dmadev_copy_sg;
> +       rte_dmadev_count;
> +       rte_dmadev_dump;
> +       rte_dmadev_fill;
> +       rte_dmadev_info_get;
> +       rte_dmadev_is_valid_dev;
> +       rte_dmadev_selftest;
> +       rte_dmadev_start;
> +       rte_dmadev_stats_get;
> +       rte_dmadev_stats_reset;
> +       rte_dmadev_stop;
> +       rte_dmadev_submit;
> +       rte_dmadev_vchan_release;
> +       rte_dmadev_vchan_setup;
> +
> +       local: *;
> +};
> +
> +INTERNAL {
> +        global:
> +
> +       rte_dmadevices;
> +       rte_dmadev_get_device_by_name;
> +       rte_dmadev_pmd_allocate;
> +       rte_dmadev_pmd_release;
> +
> +       local: *;
> +};
> +
> diff --git a/lib/meson.build b/lib/meson.build
> index 1673ca4..68d239f 100644
> --- a/lib/meson.build
> +++ b/lib/meson.build
> @@ -60,6 +60,7 @@ libraries = [
>          'bpf',
>          'graph',
>          'node',
> +        'dmadev',
>  ]
>
>  if is_windows
> --
> 2.8.1
>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v3] dmadev: introduce DMA device library
  2021-07-15  6:44       ` Jerin Jacob
@ 2021-07-15  8:25         ` Bruce Richardson
  2021-07-15  9:49           ` Jerin Jacob
  0 siblings, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-07-15  8:25 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: fengchengwen, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On Thu, Jul 15, 2021 at 12:14:05PM +0530, Jerin Jacob wrote:
> On Tue, Jul 13, 2021 at 7:08 PM Bruce Richardson
> <bruce.richardson@intel.com> wrote:
> >
> > On Tue, Jul 13, 2021 at 09:06:39PM +0800, fengchengwen wrote:
> 
> > > 4.  COMMENT:> +       uint64_t reserved[4]; /**< Reserved for future
> > > fields */
> > > > +};
> > > Please add the capability for each counter in info structure as one
> > > device may support all the counters.
> > >
> > > REPLY: This is a statistics function. If this function is not supported,
> > > then do not need to implement the stats ops function. Also could to set
> > > the unimplemented ones to zero.
> > >
> > +1
> > The stats functions should be a minimum set that is supported by all
> > drivers. Each of these stats can be easily tracked by software if HW
> > support for it is not available, so I agree that we should not have each
> > stat as a capability.
> 
> In our current HW, submitted_count and completed_count offloaded to HW.
> In addition to that, we have a provision for getting stats for bytes
> copied.( We can make it as xstat, if other drivers won't support)
> 
> our plan is to use enqueued_count and completed_fail_count in SW under
> condition compilation flags or another scheme as it is in fastpath.
> 
> If we are not planning to add capability, IMO, we need to update the
> documentation,
> like unimplemented counters will return zero. But there is the
> question of how to differentiate between
> unimplemented vs genuine zero value. IMO, we can update the doc for
> this case as well or
> add capability.
> 

While we could add capabilities for stats, I'd really rather not. Let's
just get an agreed upon minimum set. Seems like submitted and completed are
fine for all, which just leaves two to discuss for an in/out decision.

Jerin, can fail count be kept without conditional compilation, perhaps,
because it should not be touched in the fastpath but just on error legs?

For enqueued_count, in our driver I was just going to track the difference
between last doorbell and this one - which we would be tracking anyway, or
could compute very easily by saving last doorbell counter -  and add that to
the submitted count when stats are requested. That would again ensure no
fastpath impact bar perhaps storing one additional variable (old DB) per
burst. If that is felt too cumbersome, I think we can drop it, but let's at
least keep error count.

Thanks,
/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v3] dmadev: introduce DMA device library
  2021-07-14 12:22   ` Nipun Gupta
@ 2021-07-15  8:29     ` fengchengwen
  2021-07-15 11:16       ` Nipun Gupta
  0 siblings, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-07-15  8:29 UTC (permalink / raw)
  To: Nipun Gupta, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, Hemant Agrawal, maxime.coquelin, honnappa.nagarahalli,
	david.marchand, sburla, pkapoor, konstantin.ananyev,
	Gagandeep Singh

On 2021/7/14 20:22, Nipun Gupta wrote:
> <snip>
> 
>> +/**
>> + * A structure used to configure a virtual DMA channel.
>> + */
>> +struct rte_dmadev_vchan_conf {
>> +	uint8_t direction;
>> +	/**< Set of supported transfer directions
>> +	 * @see RTE_DMA_MEM_TO_MEM
>> +	 * @see RTE_DMA_MEM_TO_DEV
>> +	 * @see RTE_DMA_DEV_TO_MEM
>> +	 * @see RTE_DMA_DEV_TO_DEV
>> +	 */
>> +	/** Number of descriptor for the virtual DMA channel */
>> +	uint16_t nb_desc;
>> +	/** 1) Used to describes the port parameter in the device-to-memory
>> +	 * transfer scenario.
>> +	 * 2) Used to describes the source port parameter in the
>> +	 * device-to-device transfer scenario.
>> +	 * @see struct rte_dmadev_port_parameters
>> +	 */
> 
> There should also be a configuration to support no response (per Virtual Channel),
> And if that is enabled, user will not be required to call 'rte_dmadev_completed' API.
> This shall also be part of capability.

Do you mean some silent mode? The application only needs to submit requests to the
hardware.

Could you briefly describe the working principles and application scenarios of the
corresponding device?

> 
>> +	struct rte_dmadev_port_parameters src_port;
>> +	/** 1) Used to describes the port parameter in the memory-to-device-to
>> +	 * transfer scenario.
>> +	 * 2) Used to describes the destination port parameter in the
>> +	 * device-to-device transfer scenario.
>> +	 * @see struct rte_dmadev_port_parameters
>> +	 */
>> +	struct rte_dmadev_port_parameters dst_port;
>> +};
>> +
> 
> <snip>
> 
>> +/**
>> + * @warning
>> + * @b EXPERIMENTAL: this API may change without prior notice.
>> + *
>> + * Enqueue a scatter list copy operation onto the virtual DMA channel.
>> + *
>> + * This queues up a scatter list copy operation to be performed by hardware,
>> + * but does not trigger hardware to begin that operation.
> 
> This would need update with the submit flag.
> The statement should be true only when the flag is set?
> Similar comment I see on 'rte_dmadev_copy_sg' and 'rte_dma_fill' APIs

OK, will fix in V4

> 
>> + *
>> + * @param dev_id
>> + *   The identifier of the device.
>> + * @param vchan
>> + *   The identifier of virtual DMA channel.
>> + * @param sg
>> + *   The pointer of scatterlist.
>> + * @param flags
>> + *   An flags for this operation.
>> + *   @see RTE_DMA_OP_FLAG_*
>> + *
>> + * @return
>> + *   - 0..UINT16_MAX: index of enqueued copy scatterlist job.
>> + *   - <0: Error code returned by the driver copy scatterlist function.
>> + */
>> +__rte_experimental
>> +static inline int
>> +rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, const struct rte_dma_sg
>> *sg,
>> +		   uint64_t flags)
>> +{
>> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
>> +#ifdef RTE_DMADEV_DEBUG
>> +	if (!rte_dmadev_is_valid_dev(dev_id) ||
>> +	    vchan >= dev->data->dev_conf.max_vchans ||
>> +	    sg == NULL)
>> +		return -EINVAL;
>> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
>> +#endif
>> +	return (*dev->copy_sg)(dev, vchan, sg, flags);
>> +}
>> +
> 
> .
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v3] dmadev: introduce DMA device library
  2021-07-15  7:10   ` Jerin Jacob
@ 2021-07-15  9:03     ` Bruce Richardson
  2021-07-15  9:30       ` Jerin Jacob
  0 siblings, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-07-15  9:03 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Chengwen Feng, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On Thu, Jul 15, 2021 at 12:40:01PM +0530, Jerin Jacob wrote:
> )
>  a
> 
> On Tue, Jul 13, 2021 at 6:01 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
> >
> > This patch introduce 'dmadevice' which is a generic type of DMA
> > device.
> >
> > The APIs of dmadev library exposes some generic operations which can
> > enable configuration and I/O with the DMA devices.
> >
> > Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> 
> Thanks for v3. Seems like all major items as covered. Some more
> comments below inline.
> 
> I would suggest v4 to split the patch like (so that we can review and
> ack each patch)
> 1) Only public header file with Doxygen inclusion, (There is a lot of
> Doxygen syntax issue in the patch)
> 2) 1 or more patches for implementation.
> 

One additional follow-up comment on flags below.

/Bruce

> 
> > diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
> > new file mode 100644
> > index 0000000..f6cc4e5
<snip>
> > +       enum rte_dmadev_port_type port_type;
> missing doxgen comment for this.
> > +       union {
> > +               /** For PCIE port
> > +                *
> > +                * The following model show SoC's PCIE module connects to
> > +                * multiple PCIE hosts and multiple endpoints. The PCIE module
> > +                * has an integrate DMA controller.
> > +                * If the DMA wants to access the memory of host A, it can be
> > +                * initiated by PF1 in core0, or by VF0 of PF0 in core0.
> > +                *
<snip>
> +                       /** The pasid filed in TLP packet */
> > +                       uint64_t pasid : 20;
> > +                       /** The attributes filed in TLP packet */
> > +                       uint64_t attr : 3;
> > +                       /** The processing hint filed in TLP packet */
> > +                       uint64_t ph : 2;
> > +                       /** The steering tag filed in TLP packet */
> > +                       uint64_t st : 16;
> 
> We don't support a few attributes like passid, ph, st. Do we need
> the capability of this? or ignore this. In either case, please update the doc.
> 
> We also support additional flags for allocating LLC flag.
> This is a hint to DMA engine that the cache blocks should be allocated
> in the LLC (if they were not already).
> When the MEM pointer is a destination in DMA operation, the referenced
> cache blocks are allocated into the cache as part of completing the
> DMA (when not already present in the LLC)
> this is helpful if software has to access the data right after dma is completed.
> 
> Could you add bit or flag for the same?
> 

I wonder if this is the best location for such a flag for LLC vs memory
writes. It would also apply to memory-to-memory transactions, not just for
those done to PCI devices. As well as that, I think any flag should default
to "on" rather than "off" since writing to cache rather than DRAM is
generally the desired behaviour, I would think. Should it be a
per-operation flag, rather than per context?

<snip>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v3] dmadev: introduce DMA device library
  2021-07-15  9:03     ` Bruce Richardson
@ 2021-07-15  9:30       ` Jerin Jacob
  2021-07-15 10:03         ` Bruce Richardson
  0 siblings, 1 reply; 339+ messages in thread
From: Jerin Jacob @ 2021-07-15  9:30 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: Chengwen Feng, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On Thu, Jul 15, 2021 at 2:33 PM Bruce Richardson
<bruce.richardson@intel.com> wrote:
>
> On Thu, Jul 15, 2021 at 12:40:01PM +0530, Jerin Jacob wrote:
> > )
> >  a
> >
> > On Tue, Jul 13, 2021 at 6:01 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
> > >
> > > This patch introduce 'dmadevice' which is a generic type of DMA
> > > device.
> > >
> > > The APIs of dmadev library exposes some generic operations which can
> > > enable configuration and I/O with the DMA devices.
> > >
> > > Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> >
> > Thanks for v3. Seems like all major items as covered. Some more
> > comments below inline.
> >
> > I would suggest v4 to split the patch like (so that we can review and
> > ack each patch)
> > 1) Only public header file with Doxygen inclusion, (There is a lot of
> > Doxygen syntax issue in the patch)
> > 2) 1 or more patches for implementation.
> >
>
> One additional follow-up comment on flags below.
>
> /Bruce
>
> >
> > > diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
> > > new file mode 100644
> > > index 0000000..f6cc4e5
> <snip>
> > > +       enum rte_dmadev_port_type port_type;
> > missing doxgen comment for this.
> > > +       union {
> > > +               /** For PCIE port
> > > +                *
> > > +                * The following model show SoC's PCIE module connects to
> > > +                * multiple PCIE hosts and multiple endpoints. The PCIE module
> > > +                * has an integrate DMA controller.
> > > +                * If the DMA wants to access the memory of host A, it can be
> > > +                * initiated by PF1 in core0, or by VF0 of PF0 in core0.
> > > +                *
> <snip>
> > +                       /** The pasid filed in TLP packet */
> > > +                       uint64_t pasid : 20;
> > > +                       /** The attributes filed in TLP packet */
> > > +                       uint64_t attr : 3;
> > > +                       /** The processing hint filed in TLP packet */
> > > +                       uint64_t ph : 2;
> > > +                       /** The steering tag filed in TLP packet */
> > > +                       uint64_t st : 16;
> >
> > We don't support a few attributes like passid, ph, st. Do we need
> > the capability of this? or ignore this. In either case, please update the doc.
> >
> > We also support additional flags for allocating LLC flag.
> > This is a hint to DMA engine that the cache blocks should be allocated
> > in the LLC (if they were not already).
> > When the MEM pointer is a destination in DMA operation, the referenced
> > cache blocks are allocated into the cache as part of completing the
> > DMA (when not already present in the LLC)
> > this is helpful if software has to access the data right after dma is completed.
> >
> > Could you add bit or flag for the same?
> >
>
> I wonder if this is the best location for such a flag for LLC vs memory
> writes. It would also apply to memory-to-memory transactions, not just for
> those done to PCI devices.

Ack. it can be used for MEM to MEM

>  As well as that, I think any flag should default
> to "on" rather than "off" since writing to cache rather than DRAM is
> generally the desired behaviour, I would think.

I think, keeping it is "allocate in LLC" on all transfer will not be good.
As large transters polute the LLC and dataplane may not touch the complete
data only header. Also in device copy, Adding it LLC there is an
additional cost unline MEM-MEM.

So IMO, better to add the flag to allow to allocate to LLC as a HINT.

> Should it be a per-operation flag, rather than per context?

Yes. better it be per-operation as it is the hint.


>
> <snip>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v3] dmadev: introduce DMA device library
  2021-07-15  8:25         ` Bruce Richardson
@ 2021-07-15  9:49           ` Jerin Jacob
  2021-07-15 10:00             ` Bruce Richardson
  0 siblings, 1 reply; 339+ messages in thread
From: Jerin Jacob @ 2021-07-15  9:49 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: fengchengwen, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On Thu, Jul 15, 2021 at 1:55 PM Bruce Richardson
<bruce.richardson@intel.com> wrote:
>
> On Thu, Jul 15, 2021 at 12:14:05PM +0530, Jerin Jacob wrote:
> > On Tue, Jul 13, 2021 at 7:08 PM Bruce Richardson
> > <bruce.richardson@intel.com> wrote:
> > >
> > > On Tue, Jul 13, 2021 at 09:06:39PM +0800, fengchengwen wrote:
> >
> > > > 4.  COMMENT:> +       uint64_t reserved[4]; /**< Reserved for future
> > > > fields */
> > > > > +};
> > > > Please add the capability for each counter in info structure as one
> > > > device may support all the counters.
> > > >
> > > > REPLY: This is a statistics function. If this function is not supported,
> > > > then do not need to implement the stats ops function. Also could to set
> > > > the unimplemented ones to zero.
> > > >
> > > +1
> > > The stats functions should be a minimum set that is supported by all
> > > drivers. Each of these stats can be easily tracked by software if HW
> > > support for it is not available, so I agree that we should not have each
> > > stat as a capability.
> >
> > In our current HW, submitted_count and completed_count offloaded to HW.
> > In addition to that, we have a provision for getting stats for bytes
> > copied.( We can make it as xstat, if other drivers won't support)
> >
> > our plan is to use enqueued_count and completed_fail_count in SW under
> > condition compilation flags or another scheme as it is in fastpath.
> >
> > If we are not planning to add capability, IMO, we need to update the
> > documentation,
> > like unimplemented counters will return zero. But there is the
> > question of how to differentiate between
> > unimplemented vs genuine zero value. IMO, we can update the doc for
> > this case as well or
> > add capability.
> >
>
> While we could add capabilities for stats, I'd really rather not. Let's
> just get an agreed upon minimum set. Seems like submitted and completed are
> fine for all, which just leaves two to discuss for an in/out decision.
>
> Jerin, can fail count be kept without conditional compilation, perhaps,
> because it should not be touched in the fastpath but just on error legs?

Agree.

>
> For enqueued_count, in our driver I was just going to track the difference
> between last doorbell and this one - which we would be tracking anyway, or
> could compute very easily by saving last doorbell counter -  and add that to
> the submitted count when stats are requested. That would again ensure no
> fastpath impact bar perhaps storing one additional variable (old DB) per
> burst. If that is felt too cumbersome, I think we can drop it, but lets at
> least keep error count.

+1 to keep submitted_count, completed_count, and fail count.

enqueue count can be move to xstat if it is supported by drivers.
Also since drivers are returning, 0-2^16 monotonically incrementing counter ,
even applications can track the enqueue count if needed without the driver
support.



>
> Thanks,
> /Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v3] dmadev: introduce DMA device library
  2021-07-15  9:49           ` Jerin Jacob
@ 2021-07-15 10:00             ` Bruce Richardson
  0 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-15 10:00 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: fengchengwen, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On Thu, Jul 15, 2021 at 03:19:55PM +0530, Jerin Jacob wrote:
> On Thu, Jul 15, 2021 at 1:55 PM Bruce Richardson
> <bruce.richardson@intel.com> wrote:
> >
> > On Thu, Jul 15, 2021 at 12:14:05PM +0530, Jerin Jacob wrote:
> > > On Tue, Jul 13, 2021 at 7:08 PM Bruce Richardson
> > > <bruce.richardson@intel.com> wrote:
> > > >
> > > > On Tue, Jul 13, 2021 at 09:06:39PM +0800, fengchengwen wrote:
> > >
> > > > > 4.  COMMENT:> +       uint64_t reserved[4]; /**< Reserved for future
> > > > > fields */
> > > > > > +};
> > > > > Please add the capability for each counter in info structure as one
> > > > > device may support all the counters.
> > > > >
> > > > > REPLY: This is a statistics function. If this function is not supported,
> > > > > then do not need to implement the stats ops function. Also could to set
> > > > > the unimplemented ones to zero.
> > > > >
> > > > +1
> > > > The stats functions should be a minimum set that is supported by all
> > > > drivers. Each of these stats can be easily tracked by software if HW
> > > > support for it is not available, so I agree that we should not have each
> > > > stat as a capability.
> > >
> > > In our current HW, submitted_count and completed_count offloaded to HW.
> > > In addition to that, we have a provision for getting stats for bytes
> > > copied.( We can make it as xstat, if other drivers won't support)
> > >
> > > our plan is to use enqueued_count and completed_fail_count in SW under
> > > condition compilation flags or another scheme as it is in fastpath.
> > >
> > > If we are not planning to add capability, IMO, we need to update the
> > > documentation,
> > > like unimplemented counters will return zero. But there is the
> > > question of how to differentiate between
> > > unimplemented vs genuine zero value. IMO, we can update the doc for
> > > this case as well or
> > > add capability.
> > >
> >
> > While we could add capabilities for stats, I'd really rather not. Let's
> > just get an agreed upon minimum set. Seems like submitted and completed are
> > fine for all, which just leaves two to discuss for an in/out decision.
> >
> > Jerin, can fail count be kept without conditional compilation, perhaps,
> > because it should not be touched in the fastpath but just on error legs?
> 
> Agree.
> 
> >
> > For enqueued_count, in our driver I was just going to track the difference
> > between last doorbell and this one - which we would be tracking anyway, or
> > could compute very easily by saving last doorbell counter -  and add that to
> > the submitted count when stats are requested. That would again ensure no
> > fastpath impact bar perhaps storing one additional variable (old DB) per
> > burst. If that is felt too cumbersome, I think we can drop it, but lets at
> > least keep error count.
> 
> +1 to keep submitted_count, completed_count, and fail count.
> 
> enqueue count can be move to xstat if it is supported by drivers.
> Also since drivers are returning, 0-2^16 monotonically incrementing counter ,
> even applications can track the enqueue count if needed without the driver
> support.
>
Agreed. Let's just stick to 3 basic stats.

/Bruce 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v3] dmadev: introduce DMA device library
  2021-07-15  9:30       ` Jerin Jacob
@ 2021-07-15 10:03         ` Bruce Richardson
  2021-07-15 10:05           ` Bruce Richardson
  0 siblings, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-07-15 10:03 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Chengwen Feng, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On Thu, Jul 15, 2021 at 03:00:01PM +0530, Jerin Jacob wrote:
> On Thu, Jul 15, 2021 at 2:33 PM Bruce Richardson
> <bruce.richardson@intel.com> wrote:
> >
> > On Thu, Jul 15, 2021 at 12:40:01PM +0530, Jerin Jacob wrote:
> > > )
> > >  a
> > >
> > > On Tue, Jul 13, 2021 at 6:01 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
> > > >
> > > > This patch introduce 'dmadevice' which is a generic type of DMA
> > > > device.
> > > >
> > > > The APIs of dmadev library exposes some generic operations which can
> > > > enable configuration and I/O with the DMA devices.
> > > >
> > > > Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> > >
> > > Thanks for v3. Seems like all major items as covered. Some more
> > > comments below inline.
> > >
> > > I would suggest v4 to split the patch like (so that we can review and
> > > ack each patch)
> > > 1) Only public header file with Doxygen inclusion, (There is a lot of
> > > Doxygen syntax issue in the patch)
> > > 2) 1 or more patches for implementation.
> > >
> >
> > One additional follow-up comment on flags below.
> >
> > /Bruce
> >
> > >
> > > > diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
> > > > new file mode 100644
> > > > index 0000000..f6cc4e5
> > <snip>
> > > > +       enum rte_dmadev_port_type port_type;
> > > missing doxgen comment for this.
> > > > +       union {
> > > > +               /** For PCIE port
> > > > +                *
> > > > +                * The following model show SoC's PCIE module connects to
> > > > +                * multiple PCIE hosts and multiple endpoints. The PCIE module
> > > > +                * has an integrate DMA controller.
> > > > +                * If the DMA wants to access the memory of host A, it can be
> > > > +                * initiated by PF1 in core0, or by VF0 of PF0 in core0.
> > > > +                *
> > <snip>
> > > +                       /** The pasid filed in TLP packet */
> > > > +                       uint64_t pasid : 20;
> > > > +                       /** The attributes filed in TLP packet */
> > > > +                       uint64_t attr : 3;
> > > > +                       /** The processing hint filed in TLP packet */
> > > > +                       uint64_t ph : 2;
> > > > +                       /** The steering tag filed in TLP packet */
> > > > +                       uint64_t st : 16;
> > >
> > > We don't support a few attributes like passid, ph, st. Do we need
> > > the capability of this? or ignore this. In either case, please update the doc.
> > >
> > > We also support additional flags for allocating LLC flag.
> > > This is a hint to DMA engine that the cache blocks should be allocated
> > > in the LLC (if they were not already).
> > > When the MEM pointer is a destination in DMA operation, the referenced
> > > cache blocks are allocated into the cache as part of completing the
> > > DMA (when not already present in the LLC)
> > > this is helpful if software has to access the data right after dma is completed.
> > >
> > > Could you add bit or flag for the same?
> > >
> >
> > I wonder if this is the best location for such a flag for LLC vs memory
> > writes. It would also apply to memory-to-memory transactions, not just for
> > those done to PCI devices.
> 
> Ack. it can be used for MEM to MEM
> 
> >  As well as that, I think any flag should default
> > to "on" rather than "off" since writing to cache rather than DRAM is
> > generally the desired behaviour, I would think.
> 
> I think, keeping it is "allocate in LLC" on all transfer will not be good.
> As large transters polute the LLC and dataplane may not touch the complete
> data only header. Also in device copy, Adding it LLC there is an
> additional cost unline MEM-MEM.
> 
> So IMO, better to add the flag to allow to allocate to LLC as a HINT.
> 
> > Should it be a per-operation flag, rather than per context?
> 
> Yes. better it be per-operation as it is the hint.
> 
Ok. Let's define a new per-op flag for LLC allocation, and keep default
(without flag) as no-alloc.

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v3] dmadev: introduce DMA device library
  2021-07-15 10:03         ` Bruce Richardson
@ 2021-07-15 10:05           ` Bruce Richardson
  0 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-15 10:05 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Chengwen Feng, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On Thu, Jul 15, 2021 at 11:03:08AM +0100, Bruce Richardson wrote:
> On Thu, Jul 15, 2021 at 03:00:01PM +0530, Jerin Jacob wrote:
> > On Thu, Jul 15, 2021 at 2:33 PM Bruce Richardson
> > <bruce.richardson@intel.com> wrote:
> > >
> > > On Thu, Jul 15, 2021 at 12:40:01PM +0530, Jerin Jacob wrote:
> > > > )
> > > >  a
> > > >
> > > > On Tue, Jul 13, 2021 at 6:01 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
> > > > >
> > > > > This patch introduce 'dmadevice' which is a generic type of DMA
> > > > > device.
> > > > >
> > > > > The APIs of dmadev library exposes some generic operations which can
> > > > > enable configuration and I/O with the DMA devices.
> > > > >
> > > > > Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> > > >
> > > > Thanks for v3. Seems like all major items as covered. Some more
> > > > comments below inline.
> > > >
> > > > I would suggest v4 to split the patch like (so that we can review and
> > > > ack each patch)
> > > > 1) Only public header file with Doxygen inclusion, (There is a lot of
> > > > Doxygen syntax issue in the patch)
> > > > 2) 1 or more patches for implementation.
> > > >
> > >
> > > One additional follow-up comment on flags below.
> > >
> > > /Bruce
> > >
> > > >
> > > > > diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
> > > > > new file mode 100644
> > > > > index 0000000..f6cc4e5
> > > <snip>
> > > > > +       enum rte_dmadev_port_type port_type;
> > > > missing doxgen comment for this.
> > > > > +       union {
> > > > > +               /** For PCIE port
> > > > > +                *
> > > > > +                * The following model show SoC's PCIE module connects to
> > > > > +                * multiple PCIE hosts and multiple endpoints. The PCIE module
> > > > > +                * has an integrate DMA controller.
> > > > > +                * If the DMA wants to access the memory of host A, it can be
> > > > > +                * initiated by PF1 in core0, or by VF0 of PF0 in core0.
> > > > > +                *
> > > <snip>
> > > > +                       /** The pasid filed in TLP packet */
> > > > > +                       uint64_t pasid : 20;
> > > > > +                       /** The attributes filed in TLP packet */
> > > > > +                       uint64_t attr : 3;
> > > > > +                       /** The processing hint filed in TLP packet */
> > > > > +                       uint64_t ph : 2;
> > > > > +                       /** The steering tag filed in TLP packet */
> > > > > +                       uint64_t st : 16;
> > > >
> > > > We don't support a few attributes like passid, ph, st. Do we need
> > > > the capability of this? or ignore this. In either case, please update the doc.
> > > >
> > > > We also support additional flags for allocating LLC flag.
> > > > This is a hint to DMA engine that the cache blocks should be allocated
> > > > in the LLC (if they were not already).
> > > > When the MEM pointer is a destination in DMA operation, the referenced
> > > > cache blocks are allocated into the cache as part of completing the
> > > > DMA (when not already present in the LLC)
> > > > this is helpful if software has to access the data right after dma is completed.
> > > >
> > > > Could you add bit or flag for the same?
> > > >
> > >
> > > I wonder if this is the best location for such a flag for LLC vs memory
> > > writes. It would also apply to memory-to-memory transactions, not just for
> > > those done to PCI devices.
> > 
> > Ack. it can be used for MEM to MEM
> > 
> > >  As well as that, I think any flag should default
> > > to "on" rather than "off" since writing to cache rather than DRAM is
> > > generally the desired behaviour, I would think.
> > 
> > I think, keeping it is "allocate in LLC" on all transfer will not be good.
> > As large transters polute the LLC and dataplane may not touch the complete
> > data only header. Also in device copy, Adding it LLC there is an
> > additional cost unline MEM-MEM.
> > 
> > So IMO, better to add the flag to allow to allocate to LLC as a HINT.
> > 
> > > Should it be a per-operation flag, rather than per context?
> > 
> > Yes. better it be per-operation as it is the hint.
> > 
> Ok. Let's define a new per-op flag for LLC allocation, and keep default
> (without flag) as no-alloc.

[Apologies for self-reply]

Let's also be clear in the documentation for the flag that this is a HINT,
and that drivers may not follow this. That way we don't need to add a
capability flag for it, or to return error from a function which doesn't
support it, etc. etc.

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v3] dmadev: introduce DMA device library
  2021-07-15  8:29     ` fengchengwen
@ 2021-07-15 11:16       ` Nipun Gupta
  2021-07-15 12:11         ` Bruce Richardson
  0 siblings, 1 reply; 339+ messages in thread
From: Nipun Gupta @ 2021-07-15 11:16 UTC (permalink / raw)
  To: fengchengwen, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, Hemant Agrawal, maxime.coquelin, honnappa.nagarahalli,
	david.marchand, sburla, pkapoor, konstantin.ananyev,
	Gagandeep Singh



> -----Original Message-----
> From: fengchengwen <fengchengwen@huawei.com>
> Sent: Thursday, July 15, 2021 1:59 PM
> To: Nipun Gupta <nipun.gupta@nxp.com>; thomas@monjalon.net;
> ferruh.yigit@intel.com; bruce.richardson@intel.com; jerinj@marvell.com;
> jerinjacobk@gmail.com; andrew.rybchenko@oktetlabs.ru
> Cc: dev@dpdk.org; mb@smartsharesystems.com; Hemant Agrawal
> <hemant.agrawal@nxp.com>; maxime.coquelin@redhat.com;
> honnappa.nagarahalli@arm.com; david.marchand@redhat.com;
> sburla@marvell.com; pkapoor@marvell.com; konstantin.ananyev@intel.com;
> Gagandeep Singh <G.Singh@nxp.com>
> Subject: Re: [PATCH v3] dmadev: introduce DMA device library
> 
> On 2021/7/14 20:22, Nipun Gupta wrote:
> > <snip>
> >
> >> +/**
> >> + * A structure used to configure a virtual DMA channel.
> >> + */
> >> +struct rte_dmadev_vchan_conf {
> >> +	uint8_t direction;
> >> +	/**< Set of supported transfer directions
> >> +	 * @see RTE_DMA_MEM_TO_MEM
> >> +	 * @see RTE_DMA_MEM_TO_DEV
> >> +	 * @see RTE_DMA_DEV_TO_MEM
> >> +	 * @see RTE_DMA_DEV_TO_DEV
> >> +	 */
> >> +	/** Number of descriptor for the virtual DMA channel */
> >> +	uint16_t nb_desc;
> >> +	/** 1) Used to describes the port parameter in the device-to-memory
> >> +	 * transfer scenario.
> >> +	 * 2) Used to describes the source port parameter in the
> >> +	 * device-to-device transfer scenario.
> >> +	 * @see struct rte_dmadev_port_parameters
> >> +	 */
> >
> > There should also be a configuration to support no response (per Virtual
> Channel),
> > And if that is enabled, user will not be required to call 'rte_dmadev_completed'
> API.
> > This shall also be part of capability.
> 
> Do you mean some silent mode? The application only needs to submit requests
> to the
> hardware.
> 
> Could you briefly describe the working principles and application scenarios of
> the
> corresponding device?

It is kind of a silent mode w.r.t. the command completion from QDMA.

There could be level of synchronization in the applications at a higher level due
To which QDMA status dequeue would not be necessary and be an overhead.
In this mode extra data/bytes could be passed with DMA which would indirectly
indicate if DMA is complete or not.

> 
> >
> >> +	struct rte_dmadev_port_parameters src_port;
> >> +	/** 1) Used to describes the port parameter in the memory-to-device-to
> >> +	 * transfer scenario.
> >> +	 * 2) Used to describes the destination port parameter in the
> >> +	 * device-to-device transfer scenario.
> >> +	 * @see struct rte_dmadev_port_parameters
> >> +	 */
> >> +	struct rte_dmadev_port_parameters dst_port;
> >> +};
> >> +
> >
> > <snip>
> >
> >> +/**
> >> + * @warning
> >> + * @b EXPERIMENTAL: this API may change without prior notice.
> >> + *
> >> + * Enqueue a scatter list copy operation onto the virtual DMA channel.
> >> + *
> >> + * This queues up a scatter list copy operation to be performed by hardware,
> >> + * but does not trigger hardware to begin that operation.
> >
> > This would need update with the submit flag.
> > The statement should be true only when the flag is set?
> > Similar comment I see on 'rte_dmadev_copy_sg' and 'rte_dma_fill' APIs
> 
> OK, will fix in V4
> 
> >
> >> + *
> >> + * @param dev_id
> >> + *   The identifier of the device.
> >> + * @param vchan
> >> + *   The identifier of virtual DMA channel.
> >> + * @param sg
> >> + *   The pointer of scatterlist.
> >> + * @param flags
> >> + *   An flags for this operation.
> >> + *   @see RTE_DMA_OP_FLAG_*
> >> + *
> >> + * @return
> >> + *   - 0..UINT16_MAX: index of enqueued copy scatterlist job.
> >> + *   - <0: Error code returned by the driver copy scatterlist function.
> >> + */
> >> +__rte_experimental
> >> +static inline int
> >> +rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, const struct
> rte_dma_sg
> >> *sg,
> >> +		   uint64_t flags)
> >> +{
> >> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> >> +#ifdef RTE_DMADEV_DEBUG
> >> +	if (!rte_dmadev_is_valid_dev(dev_id) ||
> >> +	    vchan >= dev->data->dev_conf.max_vchans ||
> >> +	    sg == NULL)
> >> +		return -EINVAL;
> >> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
> >> +#endif
> >> +	return (*dev->copy_sg)(dev, vchan, sg, flags);
> >> +}
> >> +
> >
> > .
> >

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v3] dmadev: introduce DMA device library
  2021-07-15 11:16       ` Nipun Gupta
@ 2021-07-15 12:11         ` Bruce Richardson
  2021-07-15 12:31           ` Jerin Jacob
  0 siblings, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-07-15 12:11 UTC (permalink / raw)
  To: Nipun Gupta
  Cc: fengchengwen, thomas, ferruh.yigit, jerinj, jerinjacobk,
	andrew.rybchenko, dev, mb, Hemant Agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, Gagandeep Singh

On Thu, Jul 15, 2021 at 11:16:54AM +0000, Nipun Gupta wrote:
> 
> 
> > -----Original Message-----
> > From: fengchengwen <fengchengwen@huawei.com>
> > Sent: Thursday, July 15, 2021 1:59 PM
> > To: Nipun Gupta <nipun.gupta@nxp.com>; thomas@monjalon.net;
> > ferruh.yigit@intel.com; bruce.richardson@intel.com; jerinj@marvell.com;
> > jerinjacobk@gmail.com; andrew.rybchenko@oktetlabs.ru
> > Cc: dev@dpdk.org; mb@smartsharesystems.com; Hemant Agrawal
> > <hemant.agrawal@nxp.com>; maxime.coquelin@redhat.com;
> > honnappa.nagarahalli@arm.com; david.marchand@redhat.com;
> > sburla@marvell.com; pkapoor@marvell.com; konstantin.ananyev@intel.com;
> > Gagandeep Singh <G.Singh@nxp.com>
> > Subject: Re: [PATCH v3] dmadev: introduce DMA device library
> > 
> > On 2021/7/14 20:22, Nipun Gupta wrote:
> > > <snip>
> > >
> > >> +/**
> > >> + * A structure used to configure a virtual DMA channel.
> > >> + */
> > >> +struct rte_dmadev_vchan_conf {
> > >> +	uint8_t direction;
> > >> +	/**< Set of supported transfer directions
> > >> +	 * @see RTE_DMA_MEM_TO_MEM
> > >> +	 * @see RTE_DMA_MEM_TO_DEV
> > >> +	 * @see RTE_DMA_DEV_TO_MEM
> > >> +	 * @see RTE_DMA_DEV_TO_DEV
> > >> +	 */
> > >> +	/** Number of descriptor for the virtual DMA channel */
> > >> +	uint16_t nb_desc;
> > >> +	/** 1) Used to describes the port parameter in the device-to-memory
> > >> +	 * transfer scenario.
> > >> +	 * 2) Used to describes the source port parameter in the
> > >> +	 * device-to-device transfer scenario.
> > >> +	 * @see struct rte_dmadev_port_parameters
> > >> +	 */
> > >
> > > There should also be a configuration to support no response (per Virtual
> > Channel),
> > > And if that is enabled, user will not be required to call 'rte_dmadev_completed'
> > API.
> > > This shall also be part of capability.
> > 
> > Do you mean some silent mode? The application only needs to submit requests
> > to the
> > hardware.
> > 
> > Could you briefly describe the working principles and application scenarios of
> > the
> > corresponding device?
> 
> It is kind of a silent mode w.r.t. the command completion from QDMA.
> 
> There could be level of synchronization in the applications at a higher level due
> To which QDMA status dequeue would not be necessary and be an overhead.
> In this mode extra data/bytes could be passed with DMA which would indirectly
> indicate if DMA is complete or not.
> 
I'm wondering if such a setting could be per-device (i.e. per HW queue)
rather than per virtual channel? Something like this would be easier to
support in that way, because we could use different function pointers for
the fastpath operations depending on whether completions are to be tracked
or not. For example: only occasional descriptors will need completion
addresses specified in the "enqueue" calls, and the "submit" function would
also do any ring cleanup that would otherwise be done by "completed" call.
Having separate function calls would reduce the number of branches that
need to be evaluated in this mode, as well as simplifying the code.

/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v3] dmadev: introduce DMA device library
  2021-07-15 12:11         ` Bruce Richardson
@ 2021-07-15 12:31           ` Jerin Jacob
  2021-07-15 12:34             ` Nipun Gupta
  0 siblings, 1 reply; 339+ messages in thread
From: Jerin Jacob @ 2021-07-15 12:31 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: Nipun Gupta, fengchengwen, thomas, ferruh.yigit, jerinj,
	andrew.rybchenko, dev, mb, Hemant Agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, Gagandeep Singh

On Thu, Jul 15, 2021 at 5:41 PM Bruce Richardson
<bruce.richardson@intel.com> wrote:
>
> On Thu, Jul 15, 2021 at 11:16:54AM +0000, Nipun Gupta wrote:
> >
> >
> > > -----Original Message-----
> > > From: fengchengwen <fengchengwen@huawei.com>
> > > Sent: Thursday, July 15, 2021 1:59 PM
> > > To: Nipun Gupta <nipun.gupta@nxp.com>; thomas@monjalon.net;
> > > ferruh.yigit@intel.com; bruce.richardson@intel.com; jerinj@marvell.com;
> > > jerinjacobk@gmail.com; andrew.rybchenko@oktetlabs.ru
> > > Cc: dev@dpdk.org; mb@smartsharesystems.com; Hemant Agrawal
> > > <hemant.agrawal@nxp.com>; maxime.coquelin@redhat.com;
> > > honnappa.nagarahalli@arm.com; david.marchand@redhat.com;
> > > sburla@marvell.com; pkapoor@marvell.com; konstantin.ananyev@intel.com;
> > > Gagandeep Singh <G.Singh@nxp.com>
> > > Subject: Re: [PATCH v3] dmadev: introduce DMA device library
> > >
> > > On 2021/7/14 20:22, Nipun Gupta wrote:
> > > > <snip>
> > > >
> > > >> +/**
> > > >> + * A structure used to configure a virtual DMA channel.
> > > >> + */
> > > >> +struct rte_dmadev_vchan_conf {
> > > >> +        uint8_t direction;
> > > >> +        /**< Set of supported transfer directions
> > > >> +         * @see RTE_DMA_MEM_TO_MEM
> > > >> +         * @see RTE_DMA_MEM_TO_DEV
> > > >> +         * @see RTE_DMA_DEV_TO_MEM
> > > >> +         * @see RTE_DMA_DEV_TO_DEV
> > > >> +         */
> > > >> +        /** Number of descriptor for the virtual DMA channel */
> > > >> +        uint16_t nb_desc;
> > > >> +        /** 1) Used to describes the port parameter in the device-to-memory
> > > >> +         * transfer scenario.
> > > >> +         * 2) Used to describes the source port parameter in the
> > > >> +         * device-to-device transfer scenario.
> > > >> +         * @see struct rte_dmadev_port_parameters
> > > >> +         */
> > > >
> > > > There should also be a configuration to support no response (per Virtual
> > > Channel),
> > > > And if that is enabled, user will not be required to call 'rte_dmadev_completed'
> > > API.
> > > > This shall also be part of capability.
> > >
> > > Do you mean some silent mode? The application only needs to submit requests
> > > to the
> > > hardware.
> > >
> > > Could you briefly describe the working principles and application scenarios of
> > > the
> > > corresponding device?
> >
> > It is kind of a silent mode w.r.t. the command completion from QDMA.
> >
> > There could be level of synchronization in the applications at a higher level due
> > To which QDMA status dequeue would not be necessary and be an overhead.
> > In this mode extra data/bytes could be passed with DMA which would indirectly
> > indicate if DMA is complete or not.
> >
> I'm wondering if such a setting could be per-device (i.e. per HW queue)
> rather than per virtual channel? Something like this would be easier to
> support in that way, because we could use different function pointers for
> the fastpath operations depending on whether completions are to be tracked
> or not. For example: only occasional descriptors will need completion
> addresses specified in the "enqueue" calls, and the "submit" function would
> also do any ring cleanup that would otherwise be done by "completed" call.
> Having separate function calls would reduce the number of branches that
> need to be evaluated in this mode, as well as simplifying the code.


+1 to add in config param ie. for the device.

>
> /Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v3] dmadev: introduce DMA device library
  2021-07-15 12:31           ` Jerin Jacob
@ 2021-07-15 12:34             ` Nipun Gupta
  0 siblings, 0 replies; 339+ messages in thread
From: Nipun Gupta @ 2021-07-15 12:34 UTC (permalink / raw)
  To: Jerin Jacob, Bruce Richardson
  Cc: fengchengwen, thomas, ferruh.yigit, jerinj, andrew.rybchenko,
	dev, mb, Hemant Agrawal, maxime.coquelin, honnappa.nagarahalli,
	david.marchand, sburla, pkapoor, konstantin.ananyev,
	Gagandeep Singh



> -----Original Message-----
> From: Jerin Jacob <jerinjacobk@gmail.com>
> Sent: Thursday, July 15, 2021 6:02 PM
> To: Bruce Richardson <bruce.richardson@intel.com>
> Cc: Nipun Gupta <nipun.gupta@nxp.com>; fengchengwen
> <fengchengwen@huawei.com>; thomas@monjalon.net; ferruh.yigit@intel.com;
> jerinj@marvell.com; andrew.rybchenko@oktetlabs.ru; dev@dpdk.org;
> mb@smartsharesystems.com; Hemant Agrawal <hemant.agrawal@nxp.com>;
> maxime.coquelin@redhat.com; honnappa.nagarahalli@arm.com;
> david.marchand@redhat.com; sburla@marvell.com; pkapoor@marvell.com;
> konstantin.ananyev@intel.com; Gagandeep Singh <G.Singh@nxp.com>
> Subject: Re: [PATCH v3] dmadev: introduce DMA device library
> 
> On Thu, Jul 15, 2021 at 5:41 PM Bruce Richardson
> <bruce.richardson@intel.com> wrote:
> >
> > On Thu, Jul 15, 2021 at 11:16:54AM +0000, Nipun Gupta wrote:
> > >
> > >
> > > > -----Original Message-----
> > > > From: fengchengwen <fengchengwen@huawei.com>
> > > > Sent: Thursday, July 15, 2021 1:59 PM
> > > > To: Nipun Gupta <nipun.gupta@nxp.com>; thomas@monjalon.net;
> > > > ferruh.yigit@intel.com; bruce.richardson@intel.com; jerinj@marvell.com;
> > > > jerinjacobk@gmail.com; andrew.rybchenko@oktetlabs.ru
> > > > Cc: dev@dpdk.org; mb@smartsharesystems.com; Hemant Agrawal
> > > > <hemant.agrawal@nxp.com>; maxime.coquelin@redhat.com;
> > > > honnappa.nagarahalli@arm.com; david.marchand@redhat.com;
> > > > sburla@marvell.com; pkapoor@marvell.com;
> konstantin.ananyev@intel.com;
> > > > Gagandeep Singh <G.Singh@nxp.com>
> > > > Subject: Re: [PATCH v3] dmadev: introduce DMA device library
> > > >
> > > > On 2021/7/14 20:22, Nipun Gupta wrote:
> > > > > <snip>
> > > > >
> > > > >> +/**
> > > > >> + * A structure used to configure a virtual DMA channel.
> > > > >> + */
> > > > >> +struct rte_dmadev_vchan_conf {
> > > > >> +        uint8_t direction;
> > > > >> +        /**< Set of supported transfer directions
> > > > >> +         * @see RTE_DMA_MEM_TO_MEM
> > > > >> +         * @see RTE_DMA_MEM_TO_DEV
> > > > >> +         * @see RTE_DMA_DEV_TO_MEM
> > > > >> +         * @see RTE_DMA_DEV_TO_DEV
> > > > >> +         */
> > > > >> +        /** Number of descriptor for the virtual DMA channel */
> > > > >> +        uint16_t nb_desc;
> > > > >> +        /** 1) Used to describes the port parameter in the device-to-
> memory
> > > > >> +         * transfer scenario.
> > > > >> +         * 2) Used to describes the source port parameter in the
> > > > >> +         * device-to-device transfer scenario.
> > > > >> +         * @see struct rte_dmadev_port_parameters
> > > > >> +         */
> > > > >
> > > > > There should also be a configuration to support no response (per Virtual
> > > > Channel),
> > > > > And if that is enabled, user will not be required to call
> 'rte_dmadev_completed'
> > > > API.
> > > > > This shall also be part of capability.
> > > >
> > > > Do you mean some silent mode? The application only needs to submit
> requests
> > > > to the
> > > > hardware.
> > > >
> > > > Could you briefly describe the working principles and application scenarios
> of
> > > > the
> > > > corresponding device?
> > >
> > > It is kind of a silent mode w.r.t. the command completion from QDMA.
> > >
> > > There could be level of synchronization in the applications at a higher level
> due
> > > To which QDMA status dequeue would not be necessary and be an overhead.
> > > In this mode extra data/bytes could be passed with DMA which would
> indirectly
> > > indicate if DMA is complete or not.
> > >
> > I'm wondering if such a setting could be per-device (i.e. per HW queue)
> > rather than per virtual channel? Something like this would be easier to
> > support in that way, because we could use different function pointers for
> > the fastpath operations depending on whether completions are to be tracked
> > or not. For example: only occasional descriptors will need completion
> > addresses specified in the "enqueue" calls, and the "submit" function would
> > also do any ring cleanup that would otherwise be done by "completed" call.
> > Having separate function calls would reduce the number of branches that
> > need to be evaluated in this mode, as well as simplifying the code.

Agree, adding config for the device makes sense.

> 
> 
> +1 to add in config param ie. for the device.
> 
> >
> > /Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v4] dmadev: introduce DMA device library
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (6 preceding siblings ...)
  2021-07-13 12:27 ` [dpdk-dev] [PATCH v3] " Chengwen Feng
@ 2021-07-15 15:41 ` Chengwen Feng
  2021-07-15 16:04   ` fengchengwen
  2021-07-16  2:45 ` [dpdk-dev] [PATCH v5] " Chengwen Feng
                   ` (21 subsequent siblings)
  29 siblings, 1 reply; 339+ messages in thread
From: Chengwen Feng @ 2021-07-15 15:41 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce 'dmadevice' which is a generic type of DMA
device.

The APIs of dmadev library exposes some generic operations which can
enable configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
v4:
* replace xxx_complete_fails with xxx_completed_status.
* add SILENT capability, also a silent_mode in rte_dmadev_conf.
* add op_flag_llc for performance.
* rename dmadev_xxx_t to rte_dmadev_xxx_t to avoid namespace conflict.
* delete filed 'enqueued_count' from rte_dmadev_stats.
* make rte_dmadev hold 'dev_private' filed.
* add RTE_DMA_STATUS_NOT_ATTEMPED status code.
* rename RTE_DMA_STATUS_ACTIVE_DROP to RTE_DMA_STATUS_USER_ABORT.
* rename rte_dma_sg(e) to rte_dmadev_sg(e) to make sure all struct
  prefix with rte_dmadev.
* put the comment afterwards.
* fix some doxgen problem.
* delete macro RTE_DMADEV_VALID_DEV_ID_OR_RET and
  RTE_DMADEV_PTR_OR_ERR_RET.
* replace strlcpy with rte_strscpy.
* other minor modifications from review comment.
v3:
* rm reset and fill_sg ops.
* rm MT-safe capabilities.
* add submit flag.
* redefine rte_dma_sg to implement asymmetric copy.
* delete some reserved field for future use.
* rearrangement rte_dmadev/rte_dmadev_data struct.
* refresh rte_dmadev.h copyright.
* update vchan setup parameter.
* modified some inappropriate descriptions.
* arrange version.map alphabetically.
* other minor modifications from review comment.
---
 MAINTAINERS                  |    4 +
 config/rte_config.h          |    3 +
 lib/dmadev/meson.build       |    7 +
 lib/dmadev/rte_dmadev.c      |  539 ++++++++++++++++++++++
 lib/dmadev/rte_dmadev.h      | 1009 ++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h |  180 ++++++++
 lib/dmadev/rte_dmadev_pmd.h  |   72 +++
 lib/dmadev/version.map       |   37 ++
 lib/meson.build              |    1 +
 9 files changed, 1852 insertions(+)
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index af2a91d..e01a07f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -495,6 +495,10 @@ F: drivers/raw/skeleton/
 F: app/test/test_rawdev.c
 F: doc/guides/prog_guide/rawdev.rst
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+
 
 Memory Pool Drivers
 -------------------
diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c..331a431 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -81,6 +81,9 @@
 /* rawdev defines */
 #define RTE_RAWDEV_MAX_DEVS 64
 
+/* dmadev defines */
+#define RTE_DMADEV_MAX_DEVS 64
+
 /* ip_fragmentation defines */
 #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
 #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000..d2fc85e
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+sources = files('rte_dmadev.c')
+headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000..e6ecb49
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,539 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
+
+static const char *MZ_RTE_DMADEV_DATA = "rte_dmadev_data";
+/* Shared memory between primary and secondary processes. */
+static struct {
+	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
+} *dmadev_shared_data;
+
+RTE_LOG_REGISTER(rte_dmadev_logtype, lib.dmadev, INFO);
+#define RTE_DMADEV_LOG(level, ...) \
+	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "" __VA_ARGS__)
+
+/* Macros to check for valid device id */
+#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dmadev_is_valid_dev(dev_id)) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+static int
+dmadev_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMADEV_LOG(ERR, "Name can't be NULL\n");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMADEV_LOG(ERR, "Zero length DMA device name\n");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
+		RTE_DMADEV_LOG(ERR, "DMA device name is too long\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint16_t
+dmadev_find_free_dev(void)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (dmadev_shared_data->data[i].dev_name[0] == '\0') {
+			RTE_ASSERT(rte_dmadevices[i].state ==
+				   RTE_DMADEV_UNUSED);
+			return i;
+		}
+	}
+
+	return RTE_DMADEV_MAX_DEVS;
+}
+
+static struct rte_dmadev*
+dmadev_find(const char *name)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&
+		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
+			return &rte_dmadevices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dmadev_shared_data_prepare(void)
+{
+	const struct rte_memzone *mz;
+
+	if (dmadev_shared_data == NULL) {
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			/* Allocate port data and ownership shared memory. */
+			mz = rte_memzone_reserve(MZ_RTE_DMADEV_DATA,
+					 sizeof(*dmadev_shared_data),
+					 rte_socket_id(), 0);
+		} else
+			mz = rte_memzone_lookup(MZ_RTE_DMADEV_DATA);
+		if (mz == NULL)
+			return -ENOMEM;
+
+		dmadev_shared_data = mz->addr;
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+			memset(dmadev_shared_data->data, 0,
+			       sizeof(dmadev_shared_data->data));
+	}
+
+	return 0;
+}
+
+static struct rte_dmadev *
+dmadev_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t dev_id;
+
+	dev = dmadev_find(name);
+	if (dev != NULL) {
+		RTE_DMADEV_LOG(ERR, "DMA device already allocated\n");
+		return NULL;
+	}
+
+	dev_id = dmadev_find_free_dev();
+	if (dev_id == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices\n");
+		return NULL;
+	}
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+	dev->data = &dmadev_shared_data->data[dev_id];
+	dev->data->dev_id = dev_id;
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+
+	return dev;
+}
+
+static struct rte_dmadev *
+dmadev_attach_secondary(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t i;
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %s is not driven by the primary process\n",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[i];
+	dev->data = &dmadev_shared_data->data[i];
+	RTE_ASSERT(dev->data->dev_id == i);
+	dev->dev_private = dev->data->dev_private;
+
+	return dev;
+}
+
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dmadev_allocate(name);
+	else
+		dev = dmadev_attach_secondary(name);
+
+	if (dev == NULL)
+		return NULL;
+	dev->state = RTE_DMADEV_ATTACHED;
+
+	return dev;
+}
+
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev)
+{
+	void *dev_private_bak;
+
+	if (dev == NULL)
+		return -EINVAL;
+
+	if (dev->state == RTE_DMADEV_UNUSED)
+		return 0;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
+
+	dev_private_bak = dev->dev_private;
+	memset(dev, 0, sizeof(struct rte_dmadev));
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev->dev_private = dev_private_bak;
+	dev->state = RTE_DMADEV_UNUSED;
+
+	return 0;
+}
+
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name)
+{
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+	return dmadev_find(name);
+}
+
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id)
+{
+	return (dev_id < RTE_DMADEV_MAX_DEVS) &&
+		rte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED;
+}
+
+uint16_t
+rte_dmadev_count(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
+			count++;
+	}
+
+	return count;
+}
+
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dmadev_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->device = dev->device;
+	dev_info->nb_vchans = dev->data->dev_conf.max_vchans;
+
+	return 0;
+}
+
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->max_vchans > info.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure too many vchans\n", dev_id);
+		return -EINVAL;
+	}
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf);
+	if (ret == 0)
+		memcpy(&dev->data->dev_conf, dev_conf, sizeof(*dev_conf));
+
+	return ret;
+}
+
+int
+rte_dmadev_start(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already started\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->data->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dmadev_stop(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started == 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already stopped\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dmadev_close(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped before closing\n", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	return (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	uint8_t dir_all = RTE_DMA_DIR_MEM_TO_MEM | RTE_DMA_DIR_MEM_TO_DEV |
+			  RTE_DMA_DIR_DEV_TO_MEM | RTE_DMA_DIR_DEV_TO_DEV;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (conf == NULL)
+		return -EINVAL;
+
+	dev = &rte_dmadevices[dev_id];
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == 0 ||
+	    conf->direction & ~dir_all) {
+		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction & RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(info.dev_capa & RTE_DMA_DEV_CAPA_MEM_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction & RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(info.dev_capa & RTE_DMA_DEV_CAPA_MEM_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction & RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(info.dev_capa & RTE_DMA_DEV_CAPA_DEV_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction & RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(info.dev_capa & RTE_DMA_DEV_CAPA_DEV_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u number of descriptors invalid\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, conf);
+}
+
+int
+rte_dmadev_vchan_release(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->data->dev_conf.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_release, -ENOTSUP);
+	return (*dev->dev_ops->vchan_release)(dev, vchan);
+}
+
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (stats == NULL)
+		return -EINVAL;
+	if (vchan >= dev->data->dev_conf.max_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dmadev_stats));
+}
+
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->data->dev_conf.max_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (f == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+
+	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
+	fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
+	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
+	fprintf(f, "  max_vchans_configured: %u\n", info.nb_vchans);
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
+
+int
+rte_dmadev_selftest(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
+	return (*dev->dev_ops->dev_selftest)(dev_id);
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000..5e6f85c
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,1009 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ * Copyright(c) 2021 Marvell International Ltd.
+ * Copyright(c) 2021 SmartShare Systems.
+ */
+
+#ifndef _RTE_DMADEV_H_
+#define _RTE_DMADEV_H_
+
+/**
+ * @file rte_dmadev.h
+ *
+ * RTE DMA (Direct Memory Access) device APIs.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW-DMA-channel |               | HW-DMA-channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW-DMA-Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channel, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel.
+ * E.G. Application could create virtual DMA channel 0 for mem-to-mem transfer
+ *      scenario, and create virtual DMA channel 1 for mem-to-dev transfer
+ *      scenario.
+ *
+ * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
+ * PCI/SoC device probing phase performed at EAL initialization time. And could
+ * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
+ * phase.
+ *
+ * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dmadev_configure()
+ *     - rte_dmadev_vchan_setup()
+ *     - rte_dmadev_start()
+ *
+ * Then, the application can invoke dataplane APIs to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dmadev_configure()), it must invoke rte_dmadev_stop() first to stop the
+ * device and then do the reconfiguration before invoking rte_dmadev_start()
+ * again. The dataplane APIs should not be invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the
+ * rte_dmadev_close() function.
+ *
+ * The dataplane APIs include two parts:
+ *   a) The first part is the submission of operation requests:
+ *        - rte_dmadev_copy()
+ *        - rte_dmadev_copy_sg()
+ *        - rte_dmadev_fill()
+ *        - rte_dmadev_perform()
+ *      These APIs could work with different virtual DMA channels which have
+ *      different contexts.
+ *      The first three APIs are used to submit the operation request to the
+ *      virtual DMA channel, if the submission is successful, a uint16_t
+ *      ring_idx is returned, otherwise a negative number is returned.
+ *      The last API was use to issue doorbell to hardware, and also there are
+ *      flags (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs
+ *      could do the same work.
+ *   b) The second part is to obtain the result of requests:
+ *        - rte_dmadev_completed()
+ *            - return the number of operation requests completed successfully.
+ *        - rte_dmadev_completed_fails()
+ *            - return the number of operation requests failed to complete.
+ *
+ * About the ring_idx which rte_dmadev_copy/copy_sg/fill() returned,
+ * the rules are as follows:
+ *   a) ring_idx for each virtual DMA channel are independent.
+ *   b) For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *      when it reach UINT16_MAX, it wraps back to zero.
+ *   c) This ring_idx can be used by applications to track per-operation
+ *      metadata in an application-defined circular ring.
+ *   d) The initial ring_idx of a virtual DMA channel is zero, after the device
+ *      is stopped, the ring_idx needs to be reset to zero.
+ *   Example:
+ *      step-1: start one dmadev
+ *      step-2: enqueue a copy operation, the ring_idx return is 0
+ *      step-3: enqueue a copy operation again, the ring_idx return is 1
+ *      ...
+ *      step-101: stop the dmadev
+ *      step-102: start the dmadev
+ *      step-103: enqueue a copy operation, the cookie return is 0
+ *      ...
+ *      step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *      step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *      ...
+ *
+ * By default, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target object.
+ *
+ */
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_count(void);
+
+/* Enumerates DMA device capabilities */
+#define RTE_DMA_DEV_CAPA_MEM_TO_MEM	(1ull << 0)
+/**< DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMA_DEV_CAPA_MEM_TO_DEV	(1ull << 1)
+/**< DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMA_DEV_CAPA_DEV_TO_MEM	(1ull << 2)
+/**< DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMA_DEV_CAPA_DEV_TO_DEV	(1ull << 3)
+/**< DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMA_DEV_CAPA_SVA		(1ull << 4)
+/**< DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMA_DEV_CAPA_SILENT		(1ull << 5)
+/**< DMA device support work in silent mode.
+ * In this mode, application don't required to invoke rte_dmadev_completed*()
+ * API.
+ *
+ * @see struct rte_dmadev_conf::silent_mode
+ */
+
+#define RTE_DMA_DEV_CAPA_FENCE		(1ull << 6)
+/**< DMA device support fence.
+ * If device support fence, then application could set a fence flags when
+ * enqueue operation by rte_dma_copy/copy_sg/fill().
+ * If a operation has a fence flags, it means the operation must be processed
+ * only after all previous operations are completed.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMA_DEV_CAPA_OPS_COPY	(1ull << 32)
+/**< DMA device support copy ops.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMA_DEV_CAPA_OPS_COPY_SG	(1ull << 33)
+/**< DMA device support scatter-list copy ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMA_DEV_CAPA_OPS_FILL	(1ull << 34)
+/**< DMA device support fill ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+/**
+ * A structure used to retrieve the information of an DMA device.
+ */
+struct rte_dmadev_info {
+	struct rte_device *device; /**< Generic Device information. */
+	uint64_t dev_capa; /**< Device capabilities (RTE_DMA_DEV_CAPA_*). */
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channels supported. */
+	uint16_t max_desc;
+	/**< Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/**< Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   - =0: Success, driver updates the information of the DMA device.
+ *   - <0: Error code returned by the driver info get function.
+ *
+ */
+__rte_experimental
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ */
+struct rte_dmadev_conf {
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channel to use.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dmadev_info which get from rte_dmadev_info_get().
+	 */
+	uint8_t silent_mode;
+	/**< Indicates whether to work in silent mode.
+	 * 0-default mode, 1-silent mode.
+	 *
+	 * @see RTE_DMA_DEV_CAPA_SILENT
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dmadev_conf
+ *   object.
+ *
+ * @return
+ *   - =0: Success, device configured.
+ *   - <0: Error code returned by the driver configuration function.
+ */
+__rte_experimental
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device started.
+ *   - <0: Error code returned by the driver start function.
+ */
+__rte_experimental
+int
+rte_dmadev_start(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dmadev_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device stopped.
+ *   - <0: Error code returned by the driver stop function.
+ */
+__rte_experimental
+int
+rte_dmadev_stop(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *  - =0: Successfully close device
+ *  - <0: Failure to close device
+ */
+__rte_experimental
+int
+rte_dmadev_close(uint16_t dev_id);
+
+/* DMA transfer direction defines. */
+#define RTE_DMA_DIR_MEM_TO_MEM	(1ull << 0)
+/**< DMA transfer direction - from memory to memory.
+ *
+ * @see struct rte_dmadev_vchan_conf::direction
+ */
+
+#define RTE_DMA_DIR_MEM_TO_DEV	(1ull << 1)
+/**< DMA transfer direction - from memory to device.
+ * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs
+ * through the PCIE interface. In this case, the ARM SoCs works in EP(endpoint)
+ * mode, it could initiate a DMA move request from memory (which is ARM memory)
+ * to device (which is x86 host memory).
+ *
+ * @see struct rte_dmadev_vchan_conf::direction
+ */
+
+#define RTE_DMA_DIR_DEV_TO_MEM	(1ull << 2)
+/**< DMA transfer direction - from device to memory.
+ * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs
+ * through the PCIE interface. In this case, the ARM SoCs works in EP(endpoint)
+ * mode, it could initiate a DMA move request from device (which is x86 host
+ * memory) to memory (which is ARM memory).
+ *
+ * @see struct rte_dmadev_vchan_conf::direction
+ */
+
+#define RTE_DMA_DIR_DEV_TO_DEV	(1ull << 3)
+/**< DMA transfer direction - from device to device.
+ * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs
+ * through the PCIE interface. In this case, the ARM SoCs works in EP(endpoint)
+ * mode, it could initiate a DMA move request from device (which is x86 host
+ * memory) to device (which is another x86 host memory).
+ *
+ * @see struct rte_dmadev_vchan_conf::direction
+ */
+
+/**
+ * enum rte_dmadev_port_type - DMA port type defines
+ * When
+ */
+enum rte_dmadev_port_type {
+	RTE_DMADEV_PORT_PCIE = 1,
+};
+
+/**
+ * A structure used to descript DMA port parameters.
+ */
+struct rte_dmadev_port_param {
+	enum rte_dmadev_port_type port_type; /**< The device port type. */
+	union {
+		/** For PCIE port:
+		 *
+		 * The following model show SoC's PCIE module connects to
+		 * multiple PCIE hosts and multiple endpoints. The PCIE module
+		 * has an integrate DMA controller.
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * System Bus
+		 *    |     ----------PCIE module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIE Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIE Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIE Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * The following structure is used to describe the above access
+		 * port.
+		 * @note: If some fields are not supported by hardware, set
+		 *        these fileds to zero. And also there are no
+		 *        capabilities defined for this, it is the duty of the
+		 *        application to set the correct parameters.
+		 */
+		struct {
+			uint64_t coreid : 4; /**< PCIE core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			uint64_t pasid : 20;
+			/**< The pasid filed in TLP packet. */
+			uint64_t attr : 3;
+			/**< The attributes filed in TLP packet. */
+			uint64_t ph : 2;
+			/**< The processing hint filed in TLP packet. */
+			uint64_t st : 16;
+			/**< The steering tag filed in TLP packet. */
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ */
+struct rte_dmadev_vchan_conf {
+	uint8_t direction;
+	/**< Set of supported transfer directions
+	 * @see RTE_DMA_DIR_MEM_TO_MEM
+	 * @see RTE_DMA_DIR_MEM_TO_DEV
+	 * @see RTE_DMA_DIR_DEV_TO_MEM
+	 * @see RTE_DMA_DIR_DEV_TO_DEV
+	 */
+
+	/** Number of descriptor for the virtual DMA channel */
+	uint16_t nb_desc;
+	/** 1) Used to describes the port parameter in the device-to-memory
+	 * transfer scenario.
+	 * 2) Used to describes the source port parameter in the
+	 * device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+	struct rte_dmadev_port_param src_port;
+	/** 1) Used to describes the port parameter in the memory-to-device-to
+	 * transfer scenario.
+	 * 2) Used to describes the destination port parameter in the
+	 * device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+	struct rte_dmadev_port_param dst_port;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dmadev_vchan_conf object.
+ *
+ * @return
+ *   - >=0: Allocate success, it is the virtual DMA channel id. This value must
+ *          be less than the field 'max_vchans' of struct rte_dmadev_conf
+ *          which configured by rte_dmadev_configure().
+ *   - <0: Error code returned by the driver virtual channel setup function.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Release a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel which return by vchan setup.
+ *
+ * @return
+ *   - =0: Successfully release the virtual DMA channel.
+ *   - <0: Error code returned by the driver virtual channel release function.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_release(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * rte_dmadev_stats - running statistics.
+ */
+struct rte_dmadev_stats {
+	uint64_t submitted_count;
+	/**< Count of operations which were submitted to hardware. */
+	uint64_t completed_fail_count;
+	/**< Count of operations which failed to complete. */
+	uint64_t completed_count;
+	/**< Count of operations which successfully complete. */
+};
+
+#define RTE_DMADEV_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dmadev_stats
+ *   object.
+ *
+ * @return
+ *   - =0: Successfully retrieve stats.
+ *   - <0: Failure to retrieve stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   - =0: Successfully reset stats.
+ *   - <0: Failure to reset stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Non-zero otherwise.
+ */
+__rte_experimental
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger the dmadev self test.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - 0: Selftest successful.
+ *   - -ENOTSUP if the device doesn't support selftest
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_selftest(uint16_t dev_id);
+
+/* DMA transfer result status code defines */
+enum rte_dma_status_code {
+	RTE_DMA_STATUS_SUCCESSFUL = 0,
+	/**< The operation completed successfully. */
+	RTE_DMA_STATUS_USRER_ABORT,
+	/**< The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPED,
+	/**< The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/**< The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/**< The operation failed to complete due invalid destination
+	 * address.
+	 */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/**< The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/**< The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/**< The operation failed to complete due bus err. */
+	RTE_DMA_STATUS_DATA_POISION,
+	/**< The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/**< The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/**< The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the mem-to-dev/dev-to-mem/
+	 * dev-to-dev transfer scenario.
+	 */
+	RTE_DMA_STATUS_UNKNOWN,
+	/**< The operation failed to complete due unknown reason. */
+};
+
+/**
+ * rte_dmadev_sge - can hold scatter DMA operation request entry
+ */
+struct rte_dmadev_sge {
+	rte_iova_t addr;
+	uint32_t length;
+};
+
+/**
+ * rte_dmadev_sg - can hold scatter DMA operation request
+ */
+struct rte_dmadev_sg {
+	struct rte_dmadev_sge *src;
+	struct rte_dmadev_sge *dst;
+	uint16_t nb_src; /**< The number of src entry */
+	uint16_t nb_dst; /**< The number of dst entry */
+};
+
+#include "rte_dmadev_core.h"
+
+/* DMA flags to augment operation preparation. */
+#define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
+/**< DMA fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ *
+ * @see rte_dmadev_copy()
+ * @see rte_dmadev_copy_sg()
+ * @see rte_dmadev_fill()
+ */
+
+#define RTE_DMA_OP_FLAG_SUBMIT	(1ull << 1)
+/**< DMA submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ */
+
+#define RTE_DMA_OP_FLAG_LLC	(1ull << 2)
+/**< DMA write data to low level cache hint.
+ * This just a hint, and there is no capability bit for this, driver should not
+ * return error if this flag was set.
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger hardware to begin
+ * this operation, otherwise do not trigger hardware.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy job.
+ *   - <0: Error code returned by the driver copy function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+		uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+#endif
+	return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter list copy operation to be performed by hardware, if
+ * the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger hardware
+ * to begin this operation, otherwise do not trigger hardware.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param sg
+ *   The pointer of scatterlist.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy scatterlist job.
+ *   - <0: Error code returned by the driver copy scatterlist function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan,
+		   const struct rte_dmadev_sg *sg,
+		   uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    sg == NULL || sg->src == NULL || sg->dst == NULL ||
+	    sg->nb_src == 0 || sg->nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+#endif
+	return (*dev->copy_sg)(dev, vchan, sg, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger hardware to begin
+ * this operation, otherwise do not trigger hardware.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued fill job.
+ *   - <0: Error code returned by the driver fill function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
+		rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+#endif
+	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dmadev_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   - =0: Successfully trigger hardware.
+ *   - <0: Failure to trigger hardware.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+#endif
+	return (*dev->submit)(dev, vchan);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		     uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    nb_cpls == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, -ENOTSUP);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been completed, and the
+ * operations result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   The error code of operations that completed.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that failed to complete.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
+			    const uint16_t nb_cpls, uint16_t *last_idx,
+			    enum rte_dma_status_code *status)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    nb_cpls == 0 || status == NULL)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, -ENOTSUP);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_H_ */
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000..8b2da9c
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#ifndef _RTE_DMADEV_CORE_H_
+#define _RTE_DMADEV_CORE_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device internal header.
+ *
+ * This header contains internal data types, that are used by the DMA devices
+ * in order to expose their ops to the class.
+ *
+ * Applications should not use these API directly.
+ *
+ */
+
+struct rte_dmadev;
+
+typedef int (*rte_dmadev_info_get_t)(const struct rte_dmadev *dev,
+				     struct rte_dmadev_info *dev_info,
+				     uint32_t info_sz);
+/** @internal Used to get device information of a device. */
+
+typedef int (*rte_dmadev_configure_t)(struct rte_dmadev *dev,
+				      const struct rte_dmadev_conf *dev_conf);
+/** @internal Used to configure a device. */
+
+typedef int (*rte_dmadev_start_t)(struct rte_dmadev *dev);
+/** @internal Used to start a configured device. */
+
+typedef int (*rte_dmadev_stop_t)(struct rte_dmadev *dev);
+/** @internal Used to stop a configured device. */
+
+typedef int (*rte_dmadev_close_t)(struct rte_dmadev *dev);
+/** @internal Used to close a configured device. */
+
+typedef int (*rte_dmadev_vchan_setup_t)(struct rte_dmadev *dev,
+				const struct rte_dmadev_vchan_conf *conf);
+/** @internal Used to allocate and set up a virtual DMA channel. */
+
+typedef int (*rte_dmadev_vchan_release_t)(struct rte_dmadev *dev,
+					  uint16_t vchan);
+/** @internal Used to release a virtual DMA channel. */
+
+typedef int (*rte_dmadev_stats_get_t)(const struct rte_dmadev *dev,
+			uint16_t vchan, struct rte_dmadev_stats *stats,
+			uint32_t stats_sz);
+/** @internal Used to retrieve basic statistics. */
+
+typedef int (*rte_dmadev_stats_reset_t)(struct rte_dmadev *dev, uint16_t vchan);
+/** @internal Used to reset basic statistics. */
+
+typedef int (*rte_dmadev_dump_t)(const struct rte_dmadev *dev, FILE *f);
+/** @internal Used to dump internal information. */
+
+typedef int (*rte_dmadev_selftest_t)(uint16_t dev_id);
+/** @internal Used to start dmadev selftest. */
+
+typedef int (*rte_dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 rte_iova_t src, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/** @internal Used to enqueue a copy operation. */
+
+typedef int (*rte_dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
+				    const struct rte_dmadev_sg *sg,
+				    uint64_t flags);
+/** @internal Used to enqueue a scatter list copy operation. */
+
+typedef int (*rte_dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 uint64_t pattern, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/** @internal Used to enqueue a fill operation. */
+
+typedef int (*rte_dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
+/** @internal Used to trigger hardware to begin working. */
+
+typedef uint16_t (*rte_dmadev_completed_t)(struct rte_dmadev *dev,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+/** @internal Used to return number of successful completed operations. */
+
+typedef uint16_t (*rte_dmadev_completed_status_t)(struct rte_dmadev *dev,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+/** @internal Used to return number of completed operations. */
+
+/**
+ * Possible states of a DMA device.
+ */
+enum rte_dmadev_state {
+	RTE_DMADEV_UNUSED = 0,
+	/**< Device is unused before being probed. */
+	RTE_DMADEV_ATTACHED,
+	/**< Device is attached when allocated in probing. */
+};
+
+/**
+ * DMA device operations function pointer table
+ */
+struct rte_dmadev_ops {
+	rte_dmadev_info_get_t dev_info_get;
+	rte_dmadev_configure_t dev_configure;
+	rte_dmadev_start_t dev_start;
+	rte_dmadev_stop_t dev_stop;
+	rte_dmadev_close_t dev_close;
+	rte_dmadev_vchan_setup_t vchan_setup;
+	rte_dmadev_vchan_release_t vchan_release;
+	rte_dmadev_stats_get_t stats_get;
+	rte_dmadev_stats_reset_t stats_reset;
+	rte_dmadev_dump_t dev_dump;
+	rte_dmadev_selftest_t dev_selftest;
+};
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_dmadev_data {
+	void *dev_private;
+	/**< PMD-specific private data.
+	 * This is a copy of the 'dev_private' field in the 'struct rte_dmadev'
+	 * from primary process, it is used by the secondary process to get
+	 * dev_private information.
+	 */
+	uint16_t dev_id; /**< Device [external] identifier. */
+	char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ *
+ * The dataplane APIs are located at the beginning of the structure, along
+ * with the pointer to where all the data elements for the particular device
+ * are stored in shared memory. This split scheme allows the function pointer
+ * and driver data to be per-process, while the actual configuration data for
+ * the device is shared.
+ */
+struct rte_dmadev {
+	rte_dmadev_copy_t copy;
+	rte_dmadev_copy_sg_t copy_sg;
+	rte_dmadev_fill_t fill;
+	rte_dmadev_submit_t submit;
+	rte_dmadev_completed_t completed;
+	rte_dmadev_completed_status_t completed_status;
+	void *reserved_ptr; /**< Reserved for future IO function */
+	void *dev_private;
+	/**< PMD-specific private data.
+	 * If is the primary process, after dmadev allocated by
+	 * rte_dmadev_pmd_allocate(), the PCI/SoC device probing should
+	 * initialize this field, and copy it's value to the 'dev_private'
+	 * field of 'struct rte_dmadev_data' which pointer by 'data' filed.
+	 * If is the secondary process, dmadev framework will initialize this
+	 * field by copy from 'dev_private' field of 'struct rte_dmadev_data'
+	 * which initialized by primary process.
+	 * It's the primay process's responsibility to deinitialize this field
+	 * after invoke rte_dmadev_pmd_release() in the PCI/SoC device removing
+	 * stage.
+	 */
+	struct rte_dmadev_data *data; /**< Pointer to device data. */
+	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
+	struct rte_device *device;
+	/**< Device info which supplied during device initialization. */
+	enum rte_dmadev_state state; /**< Flag indicating the device state */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+extern struct rte_dmadev rte_dmadevices[];
+
+#endif /* _RTE_DMADEV_CORE_H_ */
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000..45141f9
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_PMD_H_
+#define _RTE_DMADEV_PMD_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device PMD APIs
+ *
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param dev
+ *   Device to be released.
+ *
+ * @return
+ *   - 0 on success, negative on error
+ */
+__rte_internal
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev);
+
+/**
+ * @internal
+ * Return the DMA device based on the device name.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_PMD_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000..2af78e4
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,37 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dmadev_close;
+	rte_dmadev_completed;
+	rte_dmadev_completed_fails;
+	rte_dmadev_configure;
+	rte_dmadev_copy;
+	rte_dmadev_copy_sg;
+	rte_dmadev_count;
+	rte_dmadev_dump;
+	rte_dmadev_fill;
+	rte_dmadev_info_get;
+	rte_dmadev_is_valid_dev;
+	rte_dmadev_selftest;
+	rte_dmadev_start;
+	rte_dmadev_stats_get;
+	rte_dmadev_stats_reset;
+	rte_dmadev_stop;
+	rte_dmadev_submit;
+	rte_dmadev_vchan_release;
+	rte_dmadev_vchan_setup;
+
+	local: *;
+};
+
+INTERNAL {
+        global:
+
+	rte_dmadevices;
+	rte_dmadev_get_device_by_name;
+	rte_dmadev_pmd_allocate;
+	rte_dmadev_pmd_release;
+
+	local: *;
+};
+
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4..68d239f 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -60,6 +60,7 @@ libraries = [
         'bpf',
         'graph',
         'node',
+        'dmadev',
 ]
 
 if is_windows
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v4] dmadev: introduce DMA device library
  2021-07-15 15:41 ` [dpdk-dev] [PATCH v4] " Chengwen Feng
@ 2021-07-15 16:04   ` fengchengwen
  2021-07-15 16:33     ` Bruce Richardson
  2021-07-16 12:54     ` Jerin Jacob
  0 siblings, 2 replies; 339+ messages in thread
From: fengchengwen @ 2021-07-15 16:04 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

@burce, jerin  Some unmodified review comments are returned here:

1.
COMMENT: > +			memset(dmadev_shared_data->data, 0,
> +			       sizeof(dmadev_shared_data->data));
I believe all memzones are zero on allocation anyway, so this memset is
unecessary and can be dropped.

REPLY: I didn't find a comment to clear with this function.

2.
COMMENT: > + * @see struct rte_dmadev_info::dev_capa
> + */
Drop this flag as unnecessary. All devices either always provide ordering
guarantee - in which case it's a no-op - or else support the flag.

REPLY: I prefer define it, it could let user know whether support fence.

3.
COMMENT: I would suggest v4 to split the patch like (so that we can review and
ack each patch)
1) Only public header file with Doxygen inclusion, (There is a lot of
Doxygen syntax issue in the patch)
2) 1 or more patches for implementation.

REPLY: the V4 still one patch and with doxyen files, It's better now for just
one patch of header file and implementation.
Later I will push doxyen file patch and skelton file patch.

4.
COMMENT: > +        * @see RTE_DMA_DEV_TO_MEM
> +        * @see RTE_DMA_DEV_TO_DEV
Since we can set of only one direction per vchan . Should be we make
it as enum to
make it clear.

REPLY: May some devices support it. I think it's OK for future use.

5.
COMMENT: > +__rte_experimental
> +int
> +rte_dmadev_vchan_release(uint16_t dev_id, uint16_t vchan);
I would like to remove this to align with other device class in DPDK and use
configure and start again if there change in vchannel setup/

REPLY: I think we could have dynamic reconfig vchan without stop device ability.

6.
COMMENT: > +
> +#define RTE_DMADEV_ALL_VCHAN   0xFFFFu
RTE_DMADEV_VCHAN_ALL ??

REPLY: I don't like reserved a fix length queue stats in 'struct rte_eth_stats',
which may counter the RTE_ETHDEV_QUEUE_STAT_CNTRS too short problem.
So here I define the API could get one or ALL vchans stats.

7.
COMMENT: > +rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, const struct rte_dma_sg *sg,
> +                  uint64_t flags)
In order to avoid population of rte_dma_sg in stack (as it is
fastpath), I would like
to change the API as
rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dma_sge
*src,  struct rte_dma_sge *dst,   uint16_t nb_src, uint16_t nb_dst,
uint64_t flags)

REPLY: Too many (7) parameters if it separates. I prefer define a struct wrap it.

8.
COMMENT: change RTE_LOG_REGISTER(rte_dmadev_logtype, lib.dmadev, INFO); to
RTE_LOG_REGISTER_DEFAULT, and change rte_dmadev_logtype to logtype.

REPLY: Our CI version still don't support RTE_LOG_REGISTER_DEFAULT (have not sync newest version),
I think it could fix in next version or patch.
and because RTE_LOG_REGISTER define the rte_dmadev_logtype as a un-static variable, if we
change to logtype, there maybe namespace conflict, so I think it's ok to retain the original
variables.

thanks.

[snip]

On 2021/7/15 23:41, Chengwen Feng wrote:
> This patch introduce 'dmadevice' which is a generic type of DMA
> device.
> 
> The APIs of dmadev library exposes some generic operations which can
> enable configuration and I/O with the DMA devices.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> ---
> v4:
> * replace xxx_complete_fails with xxx_completed_status.
> * add SILENT capability, also a silent_mode in rte_dmadev_conf.
> * add op_flag_llc for performance.
> * rename dmadev_xxx_t to rte_dmadev_xxx_t to avoid namespace conflict.
> * delete filed 'enqueued_count' from rte_dmadev_stats.
> * make rte_dmadev hold 'dev_private' filed.
> * add RTE_DMA_STATUS_NOT_ATTEMPED status code.
> * rename RTE_DMA_STATUS_ACTIVE_DROP to RTE_DMA_STATUS_USER_ABORT.
> * rename rte_dma_sg(e) to rte_dmadev_sg(e) to make sure all struct
>   prefix with rte_dmadev.
> * put the comment afterwards.
> * fix some doxgen problem.
> * delete macro RTE_DMADEV_VALID_DEV_ID_OR_RET and
>   RTE_DMADEV_PTR_OR_ERR_RET.
> * replace strlcpy with rte_strscpy.
> * other minor modifications from review comment.
> v3:

[snip]

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v4] dmadev: introduce DMA device library
  2021-07-15 16:04   ` fengchengwen
@ 2021-07-15 16:33     ` Bruce Richardson
  2021-07-16  3:04       ` fengchengwen
  2021-07-16 12:54     ` Jerin Jacob
  1 sibling, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-07-15 16:33 UTC (permalink / raw)
  To: fengchengwen
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

On Fri, Jul 16, 2021 at 12:04:33AM +0800, fengchengwen wrote:
> @burce, jerin  Some unmodified review comments are returned here:
> 
> 1.
> COMMENT: > +			memset(dmadev_shared_data->data, 0,
> > +			       sizeof(dmadev_shared_data->data));
> I believe all memzones are zero on allocation anyway, so this memset is
> unecessary and can be dropped.
> 
> REPLY: I didn't find a comment to clear with this function.

Ok. No big deal either way.

> 2.  COMMENT: > + * @see struct rte_dmadev_info::dev_capa
> > + */
> Drop this flag as unnecessary. All devices either always provide ordering
> guarantee - in which case it's a no-op - or else support the flag.
> 
> REPLY: I prefer define it, it could let user know whether support fence.
> 
I don't see it that way. The flag is pointless because the application
can't use it to make any decisions. If two operations require ordering the
application must use the fence flag because if the device doesn't guarantee
ordering it's necessary, and if the device does guarantee ordering it's
better and easier to just specify the flag than to put in code branches.
Having this as a capability is just going to confuse the user - better to
just say that if you need ordering, put in a fence.

> 3.
> COMMENT: I would suggest v4 to split the patch like (so that we can review and
> ack each patch)
> 1) Only public header file with Doxygen inclusion, (There is a lot of
> Doxygen syntax issue in the patch)
> 2) 1 or more patches for implementation.
> 
> REPLY: the V4 still one patch and with doxyen files, It's better now for just
> one patch of header file and implementation.
> Later I will push doxyen file patch and skelton file patch.
> 

It's fine for reviews and testing for now.

> 4.
> COMMENT: > +        * @see RTE_DMA_DEV_TO_MEM
> > +        * @see RTE_DMA_DEV_TO_DEV
> Since we can set of only one direction per vchan . Should be we make
> it as enum to
> make it clear.
> 
> REPLY: May some devices support it. I think it's OK for future use.
> 
+1
That may need a capability flag though, to indicate if a device supports
multi-direction in a single vchannel.

> 5.
> COMMENT: > +__rte_experimental
> > +int
> > +rte_dmadev_vchan_release(uint16_t dev_id, uint16_t vchan);
> I would like to remove this to align with other device class in DPDK and use
> configure and start again if there change in vchannel setup/
> 
> REPLY: I think we could have dynamic reconfig vchan without stop device ability.
> 
> 6.
> COMMENT: > +
> > +#define RTE_DMADEV_ALL_VCHAN   0xFFFFu
> RTE_DMADEV_VCHAN_ALL ??
> 
> REPLY: I don't like reserved a fix length queue stats in 'struct rte_eth_stats',
> which may counter the RTE_ETHDEV_QUEUE_STAT_CNTRS too short problem.
> So here I define the API could get one or ALL vchans stats.
> 
> 7.
> COMMENT: > +rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, const struct rte_dma_sg *sg,
> > +                  uint64_t flags)
> In order to avoid population of rte_dma_sg in stack (as it is
> fastpath), I would like
> to change the API as
> rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dma_sge
> *src,  struct rte_dma_sge *dst,   uint16_t nb_src, uint16_t nb_dst,
> uint64_t flags)
> 
> REPLY: Too many (7) parameters if it separates. I prefer define a struct wrap it.
> 
> 8.
> COMMENT: change RTE_LOG_REGISTER(rte_dmadev_logtype, lib.dmadev, INFO); to
> RTE_LOG_REGISTER_DEFAULT, and change rte_dmadev_logtype to logtype.
> 
> REPLY: Our CI version still don't support RTE_LOG_REGISTER_DEFAULT (have not sync newest version),
> I think it could fix in next version or patch.
> and because RTE_LOG_REGISTER define the rte_dmadev_logtype as a un-static variable, if we
> change to logtype, there maybe namespace conflict, so I think it's ok to retain the original
> variables.
> 
Ok on the variable naming. I think before merge, the macro will need to be
updated to REGISTER_DEFAULT, but it's something very minor.

> thanks.
> 
> [snip]

Thanks,
/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v5] dmadev: introduce DMA device library
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (7 preceding siblings ...)
  2021-07-15 15:41 ` [dpdk-dev] [PATCH v4] " Chengwen Feng
@ 2021-07-16  2:45 ` Chengwen Feng
  2021-07-16 13:20   ` Jerin Jacob
  2021-07-16 14:41   ` Bruce Richardson
  2021-07-19  3:29 ` [dpdk-dev] [PATCH v6] " Chengwen Feng
                   ` (20 subsequent siblings)
  29 siblings, 2 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-07-16  2:45 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce 'dmadevice' which is a generic type of DMA
device.

The APIs of dmadev library exposes some generic operations which can
enable configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
v5:
* add doxy-api-* file modify.
* use RTE_LOG_REGISTER_DEFAULT.
* fix typo.
* resolve some incorrect comments.
* fix some doxgen problem.
* fix version.map still hold rte_dmadev_completed_fails.
v4:
* replace xxx_complete_fails with xxx_completed_status.
* add SILENT capability, also a silent_mode in rte_dmadev_conf.
* add op_flag_llc for performance.
* rename dmadev_xxx_t to rte_dmadev_xxx_t to avoid namespace conflict.
* delete filed 'enqueued_count' from rte_dmadev_stats.
* make rte_dmadev hold 'dev_private' filed.
* add RTE_DMA_STATUS_NOT_ATTEMPED status code.
* rename RTE_DMA_STATUS_ACTIVE_DROP to RTE_DMA_STATUS_USER_ABORT.
* rename rte_dma_sg(e) to rte_dmadev_sg(e) to make sure all struct
  prefix with rte_dmadev.
* put the comment afterwards.
* fix some doxgen problem.
* delete macro RTE_DMADEV_VALID_DEV_ID_OR_RET and
  RTE_DMADEV_PTR_OR_ERR_RET.
* replace strlcpy with rte_strscpy.
* other minor modifications from review comment.
v3:
* rm reset and fill_sg ops.
* rm MT-safe capabilities.
* add submit flag.
* redefine rte_dma_sg to implement asymmetric copy.
* delete some reserved field for future use.
* rearrangement rte_dmadev/rte_dmadev_data struct.
* refresh rte_dmadev.h copyright.
* update vchan setup parameter.
* modified some inappropriate descriptions.
* arrange version.map alphabetically.
* other minor modifications from review comment.
---
 MAINTAINERS                  |    4 +
 config/rte_config.h          |    3 +
 doc/api/doxy-api-index.md    |    1 +
 doc/api/doxy-api.conf.in     |    1 +
 lib/dmadev/meson.build       |    7 +
 lib/dmadev/rte_dmadev.c      |  539 ++++++++++++++++++++++
 lib/dmadev/rte_dmadev.h      | 1028 ++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h |  182 ++++++++
 lib/dmadev/rte_dmadev_pmd.h  |   72 +++
 lib/dmadev/version.map       |   37 ++
 lib/meson.build              |    1 +
 11 files changed, 1875 insertions(+)
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index af2a91d..e01a07f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -495,6 +495,10 @@ F: drivers/raw/skeleton/
 F: app/test/test_rawdev.c
 F: doc/guides/prog_guide/rawdev.rst
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+
 
 Memory Pool Drivers
 -------------------
diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c..331a431 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -81,6 +81,9 @@
 /* rawdev defines */
 #define RTE_RAWDEV_MAX_DEVS 64
 
+/* dmadev defines */
+#define RTE_DMADEV_MAX_DEVS 64
+
 /* ip_fragmentation defines */
 #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
 #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107..ce08250 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -27,6 +27,7 @@ The public API headers are grouped by topics:
   [event_timer_adapter]    (@ref rte_event_timer_adapter.h),
   [event_crypto_adapter]   (@ref rte_event_crypto_adapter.h),
   [rawdev]             (@ref rte_rawdev.h),
+  [dmadev]             (@ref rte_dmadev.h),
   [metrics]            (@ref rte_metrics.h),
   [bitrate]            (@ref rte_bitrate.h),
   [latency]            (@ref rte_latencystats.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 325a019..109ec1f 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -35,6 +35,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
                           @TOPDIR@/lib/distributor \
+                          @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/efd \
                           @TOPDIR@/lib/ethdev \
                           @TOPDIR@/lib/eventdev \
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000..d2fc85e
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+sources = files('rte_dmadev.c')
+headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000..926629a
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,539 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
+
+static const char *MZ_RTE_DMADEV_DATA = "rte_dmadev_data";
+/* Shared memory between primary and secondary processes. */
+static struct {
+	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
+} *dmadev_shared_data;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dmadev_logtype, INFO);
+#define RTE_DMADEV_LOG(level, ...) \
+	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "" __VA_ARGS__)
+
+/* Macros to check for valid device id */
+#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dmadev_is_valid_dev(dev_id)) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+static int
+dmadev_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMADEV_LOG(ERR, "Name can't be NULL\n");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMADEV_LOG(ERR, "Zero length DMA device name\n");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
+		RTE_DMADEV_LOG(ERR, "DMA device name is too long\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint16_t
+dmadev_find_free_dev(void)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (dmadev_shared_data->data[i].dev_name[0] == '\0') {
+			RTE_ASSERT(rte_dmadevices[i].state ==
+				   RTE_DMADEV_UNUSED);
+			return i;
+		}
+	}
+
+	return RTE_DMADEV_MAX_DEVS;
+}
+
+static struct rte_dmadev*
+dmadev_find(const char *name)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&
+		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
+			return &rte_dmadevices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dmadev_shared_data_prepare(void)
+{
+	const struct rte_memzone *mz;
+
+	if (dmadev_shared_data == NULL) {
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			/* Allocate port data and ownership shared memory. */
+			mz = rte_memzone_reserve(MZ_RTE_DMADEV_DATA,
+					 sizeof(*dmadev_shared_data),
+					 rte_socket_id(), 0);
+		} else
+			mz = rte_memzone_lookup(MZ_RTE_DMADEV_DATA);
+		if (mz == NULL)
+			return -ENOMEM;
+
+		dmadev_shared_data = mz->addr;
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+			memset(dmadev_shared_data->data, 0,
+			       sizeof(dmadev_shared_data->data));
+	}
+
+	return 0;
+}
+
+static struct rte_dmadev *
+dmadev_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t dev_id;
+
+	dev = dmadev_find(name);
+	if (dev != NULL) {
+		RTE_DMADEV_LOG(ERR, "DMA device already allocated\n");
+		return NULL;
+	}
+
+	dev_id = dmadev_find_free_dev();
+	if (dev_id == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices\n");
+		return NULL;
+	}
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+	dev->data = &dmadev_shared_data->data[dev_id];
+	dev->data->dev_id = dev_id;
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+
+	return dev;
+}
+
+static struct rte_dmadev *
+dmadev_attach_secondary(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t i;
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %s is not driven by the primary process\n",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[i];
+	dev->data = &dmadev_shared_data->data[i];
+	RTE_ASSERT(dev->data->dev_id == i);
+	dev->dev_private = dev->data->dev_private;
+
+	return dev;
+}
+
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dmadev_allocate(name);
+	else
+		dev = dmadev_attach_secondary(name);
+
+	if (dev == NULL)
+		return NULL;
+	dev->state = RTE_DMADEV_ATTACHED;
+
+	return dev;
+}
+
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev)
+{
+	void *dev_private_bak;
+
+	if (dev == NULL)
+		return -EINVAL;
+
+	if (dev->state == RTE_DMADEV_UNUSED)
+		return 0;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
+
+	dev_private_bak = dev->dev_private;
+	memset(dev, 0, sizeof(struct rte_dmadev));
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev->dev_private = dev_private_bak;
+	dev->state = RTE_DMADEV_UNUSED;
+
+	return 0;
+}
+
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name)
+{
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+	return dmadev_find(name);
+}
+
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id)
+{
+	return (dev_id < RTE_DMADEV_MAX_DEVS) &&
+		rte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED;
+}
+
+uint16_t
+rte_dmadev_count(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
+			count++;
+	}
+
+	return count;
+}
+
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dmadev_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->device = dev->device;
+	dev_info->nb_vchans = dev->data->dev_conf.max_vchans;
+
+	return 0;
+}
+
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->max_vchans > info.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure too many vchans\n", dev_id);
+		return -EINVAL;
+	}
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf);
+	if (ret == 0)
+		memcpy(&dev->data->dev_conf, dev_conf, sizeof(*dev_conf));
+
+	return ret;
+}
+
+int
+rte_dmadev_start(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already started\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->data->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dmadev_stop(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started == 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already stopped\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dmadev_close(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped before closing\n", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	return (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	uint8_t dir_all = RTE_DMA_DIR_MEM_TO_MEM | RTE_DMA_DIR_MEM_TO_DEV |
+			  RTE_DMA_DIR_DEV_TO_MEM | RTE_DMA_DIR_DEV_TO_DEV;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (conf == NULL)
+		return -EINVAL;
+
+	dev = &rte_dmadevices[dev_id];
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == 0 ||
+	    conf->direction & ~dir_all) {
+		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction & RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(info.dev_capa & RTE_DMA_DEV_CAPA_MEM_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction & RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(info.dev_capa & RTE_DMA_DEV_CAPA_MEM_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction & RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(info.dev_capa & RTE_DMA_DEV_CAPA_DEV_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction & RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(info.dev_capa & RTE_DMA_DEV_CAPA_DEV_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u number of descriptors invalid\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, conf);
+}
+
+int
+rte_dmadev_vchan_release(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->data->dev_conf.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_release, -ENOTSUP);
+	return (*dev->dev_ops->vchan_release)(dev, vchan);
+}
+
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (stats == NULL)
+		return -EINVAL;
+	if (vchan >= dev->data->dev_conf.max_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dmadev_stats));
+}
+
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->data->dev_conf.max_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (f == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+
+	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
+	fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
+	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
+	fprintf(f, "  max_vchans_configured: %u\n", info.nb_vchans);
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
+
+int
+rte_dmadev_selftest(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
+	return (*dev->dev_ops->dev_selftest)(dev_id);
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000..e74e531
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,1028 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ * Copyright(c) 2021 Marvell International Ltd.
+ * Copyright(c) 2021 SmartShare Systems.
+ */
+
+#ifndef _RTE_DMADEV_H_
+#define _RTE_DMADEV_H_
+
+/**
+ * @file rte_dmadev.h
+ *
+ * RTE DMA (Direct Memory Access) device APIs.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW-DMA-channel |               | HW-DMA-channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW-DMA-Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channel, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel. e.g. Application could create
+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+ * virtual DMA channel 1 for memory-to-device transfer scenario.
+ *
+ * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
+ * PCI/SoC device probing phase performed at EAL initialization time. And could
+ * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
+ * phase.
+ *
+ * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dmadev_configure()
+ *     - rte_dmadev_vchan_setup()
+ *     - rte_dmadev_start()
+ *
+ * Then, the application can invoke dataplane APIs to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dmadev_configure()), it must invoke rte_dmadev_stop() first to stop the
+ * device and then do the reconfiguration before invoking rte_dmadev_start()
+ * again. The dataplane APIs should not be invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the
+ * rte_dmadev_close() function.
+ *
+ * The dataplane APIs include two parts:
+ * The first part is the submission of operation requests:
+ *     - rte_dmadev_copy()
+ *     - rte_dmadev_copy_sg()
+ *     - rte_dmadev_fill()
+ *     - rte_dmadev_submit()
+ *
+ * These APIs could work with different virtual DMA channels which have
+ * different contexts.
+ *
+ * The first three APIs are used to submit the operation request to the virtual
+ * DMA channel, if the submission is successful, a uint16_t ring_idx is
+ * returned, otherwise a negative number is returned.
+ *
+ * The last API was use to issue doorbell to hardware, and also there are flags
+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
+ * same work.
+ *
+ * The second part is to obtain the result of requests:
+ *     - rte_dmadev_completed()
+ *         - return the number of operation requests completed successfully.
+ *     - rte_dmadev_completed_status()
+ *         - return the number of operation requests completed.
+ *
+ * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
+ * rte_dmadev_fill()) returned, the rules are as follows:
+ *     - ring_idx for each virtual DMA channel are independent.
+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *       when it reach UINT16_MAX, it wraps back to zero.
+ *     - This ring_idx can be used by applications to track per-operation
+ *       metadata in an application-defined circular ring.
+ *     - The initial ring_idx of a virtual DMA channel is zero, after the
+ *       device is stopped, the ring_idx needs to be reset to zero.
+ *
+ * One example:
+ *     - step-1: start one dmadev
+ *     - step-2: enqueue a copy operation, the ring_idx return is 0
+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1
+ *     - ...
+ *     - step-101: stop the dmadev
+ *     - step-102: start the dmadev
+ *     - step-103: enqueue a copy operation, the cookie return is 0
+ *     - ...
+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *
+ * By default, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target object.
+ *
+ */
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_count(void);
+
+/* Enumerates DMA device capabilities */
+#define RTE_DMA_DEV_CAPA_MEM_TO_MEM	(1ull << 0)
+/**< DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMA_DEV_CAPA_MEM_TO_DEV	(1ull << 1)
+/**< DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMA_DEV_CAPA_DEV_TO_MEM	(1ull << 2)
+/**< DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMA_DEV_CAPA_DEV_TO_DEV	(1ull << 3)
+/**< DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMA_DEV_CAPA_SVA		(1ull << 4)
+/**< DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMA_DEV_CAPA_SILENT		(1ull << 5)
+/**< DMA device support work in silent mode.
+ * In this mode, application don't required to invoke rte_dmadev_completed*()
+ * API.
+ *
+ * @see struct rte_dmadev_conf::silent_mode
+ */
+
+#define RTE_DMA_DEV_CAPA_FENCE		(1ull << 6)
+/**< DMA device support fence.
+ * If device support fence, then application could set a fence flags when
+ * enqueue operation by rte_dma_copy/copy_sg/fill().
+ * If a operation has a fence flags, it means the operation must be processed
+ * only after all previous operations are completed.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMA_DEV_CAPA_OPS_COPY	(1ull << 32)
+/**< DMA device support copy ops.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMA_DEV_CAPA_OPS_COPY_SG	(1ull << 33)
+/**< DMA device support scatter-list copy ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMA_DEV_CAPA_OPS_FILL	(1ull << 34)
+/**< DMA device support fill ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+/**
+ * A structure used to retrieve the information of an DMA device.
+ */
+struct rte_dmadev_info {
+	struct rte_device *device; /**< Generic Device information. */
+	uint64_t dev_capa; /**< Device capabilities (RTE_DMA_DEV_CAPA_*). */
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channels supported. */
+	uint16_t max_desc;
+	/**< Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/**< Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   - =0: Success, driver updates the information of the DMA device.
+ *   - <0: Error code returned by the driver info get function.
+ *
+ */
+__rte_experimental
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ */
+struct rte_dmadev_conf {
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channel to use.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dmadev_info which get from rte_dmadev_info_get().
+	 */
+	uint8_t silent_mode;
+	/**< Indicates whether to work in silent mode.
+	 * 0-default mode, 1-silent mode.
+	 *
+	 * @see RTE_DMA_DEV_CAPA_SILENT
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dmadev_conf
+ *   object.
+ *
+ * @return
+ *   - =0: Success, device configured.
+ *   - <0: Error code returned by the driver configuration function.
+ */
+__rte_experimental
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device started.
+ *   - <0: Error code returned by the driver start function.
+ */
+__rte_experimental
+int
+rte_dmadev_start(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dmadev_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device stopped.
+ *   - <0: Error code returned by the driver stop function.
+ */
+__rte_experimental
+int
+rte_dmadev_stop(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *  - =0: Successfully close device
+ *  - <0: Failure to close device
+ */
+__rte_experimental
+int
+rte_dmadev_close(uint16_t dev_id);
+
+/* DMA transfer direction defines. */
+#define RTE_DMA_DIR_MEM_TO_MEM	(1ull << 0)
+/**< DMA transfer direction - from memory to memory.
+ *
+ * @see struct rte_dmadev_vchan_conf::direction
+ */
+
+#define RTE_DMA_DIR_MEM_TO_DEV	(1ull << 1)
+/**< DMA transfer direction - from memory to device.
+ * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs
+ * through the PCIE interface. In this case, the ARM SoCs works in EP(endpoint)
+ * mode, it could initiate a DMA move request from memory (which is ARM memory)
+ * to device (which is x86 host memory).
+ *
+ * @see struct rte_dmadev_vchan_conf::direction
+ */
+
+#define RTE_DMA_DIR_DEV_TO_MEM	(1ull << 2)
+/**< DMA transfer direction - from device to memory.
+ * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs
+ * through the PCIE interface. In this case, the ARM SoCs works in EP(endpoint)
+ * mode, it could initiate a DMA move request from device (which is x86 host
+ * memory) to memory (which is ARM memory).
+ *
+ * @see struct rte_dmadev_vchan_conf::direction
+ */
+
+#define RTE_DMA_DIR_DEV_TO_DEV	(1ull << 3)
+/**< DMA transfer direction - from device to device.
+ * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs
+ * through the PCIE interface. In this case, the ARM SoCs works in EP(endpoint)
+ * mode, it could initiate a DMA move request from device (which is x86 host
+ * memory) to device (which is another x86 host memory).
+ *
+ * @see struct rte_dmadev_vchan_conf::direction
+ */
+
+/**
+ * enum rte_dmadev_port_type - DMA port type defines
+ * When
+ */
+enum rte_dmadev_port_type {
+	RTE_DMADEV_PORT_PCIE = 1,
+};
+
+/**
+ * A structure used to descript DMA port parameters.
+ */
+struct rte_dmadev_port_param {
+	enum rte_dmadev_port_type port_type; /**< The device port type. */
+	union {
+		/** For PCIE port:
+		 *
+		 * The following model show SoC's PCIE module connects to
+		 * multiple PCIE hosts and multiple endpoints. The PCIE module
+		 * has an integrate DMA controller.
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * System Bus
+		 *    |     ----------PCIE module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIE Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIE Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIE Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * The following structure is used to describe the above access
+		 * port.
+		 *
+		 * @note If some fields are not supported by hardware, set
+		 * these fields to zero. And also there are no capabilities
+		 * defined for this, it is the duty of the application to set
+		 * the correct parameters.
+		 */
+		struct {
+			uint64_t coreid : 4; /**< PCIE core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			uint64_t pasid : 20;
+			/**< The pasid filed in TLP packet. */
+			uint64_t attr : 3;
+			/**< The attributes filed in TLP packet. */
+			uint64_t ph : 2;
+			/**< The processing hint filed in TLP packet. */
+			uint64_t st : 16;
+			/**< The steering tag filed in TLP packet. */
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ */
+struct rte_dmadev_vchan_conf {
+	uint8_t direction;
+	/**< Set of supported transfer directions
+	 * @see RTE_DMA_DIR_MEM_TO_MEM
+	 * @see RTE_DMA_DIR_MEM_TO_DEV
+	 * @see RTE_DMA_DIR_DEV_TO_MEM
+	 * @see RTE_DMA_DIR_DEV_TO_DEV
+	 */
+
+	/** Number of descriptor for the virtual DMA channel */
+	uint16_t nb_desc;
+	/** 1) Used to describes the port parameter in the device-to-memory
+	 * transfer scenario.
+	 * 2) Used to describes the source port parameter in the
+	 * device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+	struct rte_dmadev_port_param src_port;
+	/** 1) Used to describes the port parameter in the memory-to-device
+	 * transfer scenario.
+	 * 2) Used to describes the destination port parameter in the
+	 * device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+	struct rte_dmadev_port_param dst_port;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dmadev_vchan_conf object.
+ *
+ * @return
+ *   - >=0: Allocate success, it is the virtual DMA channel id. This value must
+ *          be less than the field 'max_vchans' of struct rte_dmadev_conf
+ *          which configured by rte_dmadev_configure().
+ *   - <0: Error code returned by the driver virtual channel setup function.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Release a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel which return by vchan setup.
+ *
+ * @return
+ *   - =0: Successfully release the virtual DMA channel.
+ *   - <0: Error code returned by the driver virtual channel release function.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_release(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * rte_dmadev_stats - running statistics.
+ */
+struct rte_dmadev_stats {
+	uint64_t submitted_count;
+	/**< Count of operations which were submitted to hardware. */
+	uint64_t completed_fail_count;
+	/**< Count of operations which failed to complete. */
+	uint64_t completed_count;
+	/**< Count of operations which successfully complete. */
+};
+
+#define RTE_DMADEV_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dmadev_stats
+ *   object.
+ *
+ * @return
+ *   - =0: Successfully retrieve stats.
+ *   - <0: Failure to retrieve stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   - =0: Successfully reset stats.
+ *   - <0: Failure to reset stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Non-zero otherwise.
+ */
+__rte_experimental
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger the dmadev self test.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - 0: Selftest successful.
+ *   - -ENOTSUP if the device doesn't support selftest
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_selftest(uint16_t dev_id);
+
+/**
+ * rte_dma_status_code - DMA transfer result status code defines
+ */
+enum rte_dma_status_code {
+	RTE_DMA_STATUS_SUCCESSFUL = 0,
+	/**< The operation completed successfully. */
+	RTE_DMA_STATUS_USRER_ABORT,
+	/**< The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPTED,
+	/**< The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/**< The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/**< The operation failed to complete due invalid destination
+	 * address.
+	 */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/**< The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/**< The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/**< The operation failed to complete due bus err. */
+	RTE_DMA_STATUS_DATA_POISION,
+	/**< The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/**< The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/**< The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the memory-to-device/
+	 * device-to-memory/device-to-device transfer scenario.
+	 */
+	RTE_DMA_STATUS_UNKNOWN = 0x100,
+	/**< The operation failed to complete due unknown reason.
+	 * The initial value is 256, which reserves space for future errors.
+	 */
+};
+
+/**
+ * rte_dmadev_sge - can hold scatter DMA operation request entry
+ */
+struct rte_dmadev_sge {
+	rte_iova_t addr;
+	uint32_t length;
+};
+
+/**
+ * rte_dmadev_sg - can hold scatter DMA operation request
+ */
+struct rte_dmadev_sg {
+	struct rte_dmadev_sge *src;
+	struct rte_dmadev_sge *dst;
+	uint16_t nb_src; /**< The number of src entry */
+	uint16_t nb_dst; /**< The number of dst entry */
+};
+
+#include "rte_dmadev_core.h"
+
+/* DMA flags to augment operation preparation. */
+#define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
+/**< DMA fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ *
+ * @see rte_dmadev_copy()
+ * @see rte_dmadev_copy_sg()
+ * @see rte_dmadev_fill()
+ */
+
+#define RTE_DMA_OP_FLAG_SUBMIT	(1ull << 1)
+/**< DMA submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ */
+
+#define RTE_DMA_OP_FLAG_LLC	(1ull << 2)
+/**< DMA write data to low level cache hint.
+ * This just a hint, and there is no capability bit for this, driver should not
+ * return error if this flag was set.
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger hardware to begin
+ * this operation, otherwise do not trigger hardware.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy job.
+ *   - <0: Error code returned by the driver copy function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+		uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+#endif
+
+	return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter list copy operation to be performed by hardware, if
+ * the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger hardware
+ * to begin this operation, otherwise do not trigger hardware.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param sg
+ *   The pointer of scatterlist.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy scatterlist job.
+ *   - <0: Error code returned by the driver copy scatterlist function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan,
+		   const struct rte_dmadev_sg *sg,
+		   uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    sg == NULL || sg->src == NULL || sg->dst == NULL ||
+	    sg->nb_src == 0 || sg->nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+#endif
+
+	return (*dev->copy_sg)(dev, vchan, sg, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger hardware to begin
+ * this operation, otherwise do not trigger hardware.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued fill job.
+ *   - <0: Error code returned by the driver fill function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
+		rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+#endif
+
+	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dmadev_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   - =0: Successfully trigger hardware.
+ *   - <0: Failure to trigger hardware.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+#endif
+
+	return (*dev->submit)(dev, vchan);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed. This return value
+ *   must be less than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		     uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    nb_cpls == 0)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been completed, and the
+ * operations result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   The error code of operations that completed.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that completed. This return value must be less
+ *   than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
+			    const uint16_t nb_cpls, uint16_t *last_idx,
+			    enum rte_dma_status_code *status)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    nb_cpls == 0 || status == NULL)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_H_ */
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000..04f6310
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#ifndef _RTE_DMADEV_CORE_H_
+#define _RTE_DMADEV_CORE_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device internal header.
+ *
+ * This header contains internal data types, that are used by the DMA devices
+ * in order to expose their ops to the class.
+ *
+ * Applications should not use these API directly.
+ *
+ */
+
+struct rte_dmadev;
+
+typedef int (*rte_dmadev_info_get_t)(const struct rte_dmadev *dev,
+				     struct rte_dmadev_info *dev_info,
+				     uint32_t info_sz);
+/** @internal Used to get device information of a device. */
+
+typedef int (*rte_dmadev_configure_t)(struct rte_dmadev *dev,
+				      const struct rte_dmadev_conf *dev_conf);
+/** @internal Used to configure a device. */
+
+typedef int (*rte_dmadev_start_t)(struct rte_dmadev *dev);
+/** @internal Used to start a configured device. */
+
+typedef int (*rte_dmadev_stop_t)(struct rte_dmadev *dev);
+/** @internal Used to stop a configured device. */
+
+typedef int (*rte_dmadev_close_t)(struct rte_dmadev *dev);
+/** @internal Used to close a configured device. */
+
+typedef int (*rte_dmadev_vchan_setup_t)(struct rte_dmadev *dev,
+				const struct rte_dmadev_vchan_conf *conf);
+/** @internal Used to allocate and set up a virtual DMA channel. */
+
+typedef int (*rte_dmadev_vchan_release_t)(struct rte_dmadev *dev,
+					  uint16_t vchan);
+/** @internal Used to release a virtual DMA channel. */
+
+typedef int (*rte_dmadev_stats_get_t)(const struct rte_dmadev *dev,
+			uint16_t vchan, struct rte_dmadev_stats *stats,
+			uint32_t stats_sz);
+/** @internal Used to retrieve basic statistics. */
+
+typedef int (*rte_dmadev_stats_reset_t)(struct rte_dmadev *dev, uint16_t vchan);
+/** @internal Used to reset basic statistics. */
+
+typedef int (*rte_dmadev_dump_t)(const struct rte_dmadev *dev, FILE *f);
+/** @internal Used to dump internal information. */
+
+typedef int (*rte_dmadev_selftest_t)(uint16_t dev_id);
+/** @internal Used to start dmadev selftest. */
+
+typedef int (*rte_dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 rte_iova_t src, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/** @internal Used to enqueue a copy operation. */
+
+typedef int (*rte_dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
+				    const struct rte_dmadev_sg *sg,
+				    uint64_t flags);
+/** @internal Used to enqueue a scatter list copy operation. */
+
+typedef int (*rte_dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 uint64_t pattern, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/** @internal Used to enqueue a fill operation. */
+
+typedef int (*rte_dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
+/** @internal Used to trigger hardware to begin working. */
+
+typedef uint16_t (*rte_dmadev_completed_t)(struct rte_dmadev *dev,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+/** @internal Used to return number of successful completed operations. */
+
+typedef uint16_t (*rte_dmadev_completed_status_t)(struct rte_dmadev *dev,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+/** @internal Used to return number of completed operations. */
+
+/**
+ * Possible states of a DMA device.
+ */
+enum rte_dmadev_state {
+	RTE_DMADEV_UNUSED = 0,
+	/**< Device is unused before being probed. */
+	RTE_DMADEV_ATTACHED,
+	/**< Device is attached when allocated in probing. */
+};
+
+/**
+ * DMA device operations function pointer table
+ */
+struct rte_dmadev_ops {
+	rte_dmadev_info_get_t dev_info_get;
+	rte_dmadev_configure_t dev_configure;
+	rte_dmadev_start_t dev_start;
+	rte_dmadev_stop_t dev_stop;
+	rte_dmadev_close_t dev_close;
+	rte_dmadev_vchan_setup_t vchan_setup;
+	rte_dmadev_vchan_release_t vchan_release;
+	rte_dmadev_stats_get_t stats_get;
+	rte_dmadev_stats_reset_t stats_reset;
+	rte_dmadev_dump_t dev_dump;
+	rte_dmadev_selftest_t dev_selftest;
+};
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_dmadev_data {
+	void *dev_private;
+	/**< PMD-specific private data.
+	 * This is a copy of the 'dev_private' field in the 'struct rte_dmadev'
+	 * from primary process, it is used by the secondary process to get
+	 * dev_private information.
+	 */
+	uint16_t dev_id; /**< Device [external] identifier. */
+	char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ *
+ * The dataplane APIs are located at the beginning of the structure, along
+ * with the pointer to where all the data elements for the particular device
+ * are stored in shared memory. This split scheme allows the function pointer
+ * and driver data to be per-process, while the actual configuration data for
+ * the device is shared.
+ * And the 'dev_private' field was placed in the first cache line to optimize
+ * performance because the PMD driver mainly depends on this field.
+ */
+struct rte_dmadev {
+	rte_dmadev_copy_t copy;
+	rte_dmadev_copy_sg_t copy_sg;
+	rte_dmadev_fill_t fill;
+	rte_dmadev_submit_t submit;
+	rte_dmadev_completed_t completed;
+	rte_dmadev_completed_status_t completed_status;
+	void *reserved_ptr; /**< Reserved for future IO function. */
+	void *dev_private;
+	/**< PMD-specific private data.
+	 * If is the primary process, after dmadev allocated by
+	 * rte_dmadev_pmd_allocate(), the PCI/SoC device probing should
+	 * initialize this field, and copy it's value to the 'dev_private'
+	 * field of 'struct rte_dmadev_data' which pointer by 'data' filed.
+	 * If is the secondary process, dmadev framework will initialize this
+	 * field by copy from 'dev_private' field of 'struct rte_dmadev_data'
+	 * which initialized by primary process.
+	 * It's the primary process responsibility to deinitialize this field
+	 * after invoke rte_dmadev_pmd_release() in the PCI/SoC device removing
+	 * stage.
+	 */
+	struct rte_dmadev_data *data; /**< Pointer to device data. */
+	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
+	struct rte_device *device;
+	/**< Device info which supplied during device initialization. */
+	enum rte_dmadev_state state; /**< Flag indicating the device state. */
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+} __rte_cache_aligned;
+
+extern struct rte_dmadev rte_dmadevices[];
+
+#endif /* _RTE_DMADEV_CORE_H_ */
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000..45141f9
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_PMD_H_
+#define _RTE_DMADEV_PMD_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device PMD APIs
+ *
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param dev
+ *   Device to be released.
+ *
+ * @return
+ *   - 0 on success, negative on error
+ */
+__rte_internal
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev);
+
+/**
+ * @internal
+ * Return the DMA device based on the device name.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_PMD_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000..64f030f
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,37 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dmadev_close;
+	rte_dmadev_completed;
+	rte_dmadev_completed_status;
+	rte_dmadev_configure;
+	rte_dmadev_copy;
+	rte_dmadev_copy_sg;
+	rte_dmadev_count;
+	rte_dmadev_dump;
+	rte_dmadev_fill;
+	rte_dmadev_info_get;
+	rte_dmadev_is_valid_dev;
+	rte_dmadev_selftest;
+	rte_dmadev_start;
+	rte_dmadev_stats_get;
+	rte_dmadev_stats_reset;
+	rte_dmadev_stop;
+	rte_dmadev_submit;
+	rte_dmadev_vchan_release;
+	rte_dmadev_vchan_setup;
+
+	local: *;
+};
+
+INTERNAL {
+        global:
+
+	rte_dmadevices;
+	rte_dmadev_get_device_by_name;
+	rte_dmadev_pmd_allocate;
+	rte_dmadev_pmd_release;
+
+	local: *;
+};
+
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4..68d239f 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -60,6 +60,7 @@ libraries = [
         'bpf',
         'graph',
         'node',
+        'dmadev',
 ]
 
 if is_windows
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v4] dmadev: introduce DMA device library
  2021-07-15 16:33     ` Bruce Richardson
@ 2021-07-16  3:04       ` fengchengwen
  2021-07-16  9:50         ` Bruce Richardson
  2021-07-16 12:40         ` Jerin Jacob
  0 siblings, 2 replies; 339+ messages in thread
From: fengchengwen @ 2021-07-16  3:04 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

On 2021/7/16 0:33, Bruce Richardson wrote:
> On Fri, Jul 16, 2021 at 12:04:33AM +0800, fengchengwen wrote:
>> @burce, jerin  Some unmodified review comments are returned here:
>>

[snip]

> 
>> 2.  COMMENT: > + * @see struct rte_dmadev_info::dev_capa
>>> + */
>> Drop this flag as unnecessary. All devices either always provide ordering
>> guarantee - in which case it's a no-op - or else support the flag.
>>
>> REPLY: I prefer define it, it could let user know whether support fence.
>>
> I don't see it that way. The flag is pointless because the application
> can't use it to make any decisions. If two operations require ordering the
> application must use the fence flag because if the device doesn't guarantee
> ordering it's necessary, and if the device does guarantee ordering it's
> better and easier to just specify the flag than to put in code branches.
> Having this as a capability is just going to confuse the user - better to
> just say that if you need ordering, put in a fence.
> 

If driver don't support fence, and application set the fence flag, What's
driving behavior like? return error or implement fence at driver layer ?

If expose the fence capability to application, then application could decide
which option to use. e.g. commit the operations before the fence and make sure it completes,
or use another collaborative approach.

I think in this manner, the driver implementation can be simplified.

[snip]

>> 4.
>> COMMENT: > +        * @see RTE_DMA_DEV_TO_MEM
>>> +        * @see RTE_DMA_DEV_TO_DEV
>> Since we can set of only one direction per vchan . Should be we make
>> it as enum to
>> make it clear.
>>
>> REPLY: May some devices support it. I think it's OK for future use.
>>
> +1
> That may need a capability flag though, to indicate if a device supports
> multi-direction in a single vchannel.

There are a lot of combinations, and I tend not to add a capability for multi-direction.
Currently, no device supports multiple directions, So can we delay that definition?

[snip]

thanks

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v4] dmadev: introduce DMA device library
  2021-07-16  3:04       ` fengchengwen
@ 2021-07-16  9:50         ` Bruce Richardson
  2021-07-16 12:34           ` Jerin Jacob
  2021-07-16 12:40         ` Jerin Jacob
  1 sibling, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-07-16  9:50 UTC (permalink / raw)
  To: fengchengwen
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

On Fri, Jul 16, 2021 at 11:04:30AM +0800, fengchengwen wrote:
> On 2021/7/16 0:33, Bruce Richardson wrote:
> > On Fri, Jul 16, 2021 at 12:04:33AM +0800, fengchengwen wrote:
> >> @burce, jerin  Some unmodified review comments are returned here:
> >>
> 
> [snip]
> 
> > 
> >> 2.  COMMENT: > + * @see struct rte_dmadev_info::dev_capa
> >>> + */
> >> Drop this flag as unnecessary. All devices either always provide ordering
> >> guarantee - in which case it's a no-op - or else support the flag.
> >>
> >> REPLY: I prefer define it, it could let user know whether support fence.
> >>
> > I don't see it that way. The flag is pointless because the application
> > can't use it to make any decisions. If two operations require ordering the
> > application must use the fence flag because if the device doesn't guarantee
> > ordering it's necessary, and if the device does guarantee ordering it's
> > better and easier to just specify the flag than to put in code branches.
> > Having this as a capability is just going to confuse the user - better to
> > just say that if you need ordering, put in a fence.
> > 
> 
> If driver don't support fence, and application set the fence flag, What's
> driving behavior like? return error or implement fence at driver layer ?
> 
The simple matter is that all devices must support ordering. Therefore all
devices must support the fence flag. If a device does all jobs in-order
anyway, then fence flag can be ignored, while if jobs are done in parallel
or out-of-order, then fence flag must be respected. A driver should never
return error just because a fence flag is set.

> If expose the fence capability to application, then application could decide
> which option to use. e.g. commit the operations before the fence and make sure it completes,
> or use another collaborative approach.
> 
> I think in this manner, the driver implementation can be simplified.
> 
What you are describing is not a fence capability, but a limitation of the
hardware to enforce ordering. Even if we don't have fence, there is no
guarantee that one burst of jobs committed will complete before the next is
started as some HW can do batches in parallel in some circumstances.

As far as I know, all currently proposed HW using this API either works
in-order or supports fencing, so this capability flag is unnecessary. I
suggest we omit it until it is needed - at which point we can re-open the
discussion based on a concrete usecase.

If you *really* want to have the flag, I suggest:
* have one for fencing doesn't order, i.e. fence flag is ignored and the
  device does not work in-order
* make it clear in the documentation that even if fencing doesn't order
  it's still not an error to put in a fence flag - it will just be ignored.

/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v4] dmadev: introduce DMA device library
  2021-07-16  9:50         ` Bruce Richardson
@ 2021-07-16 12:34           ` Jerin Jacob
  0 siblings, 0 replies; 339+ messages in thread
From: Jerin Jacob @ 2021-07-16 12:34 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: fengchengwen, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On Fri, Jul 16, 2021 at 3:20 PM Bruce Richardson
<bruce.richardson@intel.com> wrote:
>
> On Fri, Jul 16, 2021 at 11:04:30AM +0800, fengchengwen wrote:
> > On 2021/7/16 0:33, Bruce Richardson wrote:
> > > On Fri, Jul 16, 2021 at 12:04:33AM +0800, fengchengwen wrote:
> > >> @burce, jerin  Some unmodified review comments are returned here:
> > >>
> >
> > [snip]
> >
> > >
> > >> 2.  COMMENT: > + * @see struct rte_dmadev_info::dev_capa
> > >>> + */
> > >> Drop this flag as unnecessary. All devices either always provide ordering
> > >> guarantee - in which case it's a no-op - or else support the flag.
> > >>
> > >> REPLY: I prefer define it, it could let user know whether support fence.
> > >>
> > > I don't see it that way. The flag is pointless because the application
> > > can't use it to make any decisions. If two operations require ordering the
> > > application must use the fence flag because if the device doesn't guarantee
> > > ordering it's necessary, and if the device does guarantee ordering it's
> > > better and easier to just specify the flag than to put in code branches.
> > > Having this as a capability is just going to confuse the user - better to
> > > just say that if you need ordering, put in a fence.
> > >
> >
> > If driver don't support fence, and application set the fence flag, What's
> > driving behavior like? return error or implement fence at driver layer ?
> >
> The simple matter is that all devices must support ordering. Therefore all
> devices must support the fence flag. If a device does all jobs in-order
> anyway, then fence flag can be ignored, while if jobs are done in parallel
> or out-of-order, then fence flag must be respected. A driver should never
> return error just because a fence flag is set.
>
> > If expose the fence capability to application, then application could decide
> > which option to use. e.g. commit the operations before the fence and make sure it completes,
> > or use another collaborative approach.
> >
> > I think in this manner, the driver implementation can be simplified.
> >
> What you are describing is not a fence capability, but a limitation of the
> hardware to enforce ordering. Even if we don't have fence, there is no
> guarantee that one burst of jobs committed will complete before the next is
> started as some HW can do batches in parallel in some circumstances.
>
> As far as I know, all currently proposed HW using this API either works
> in-order or supports fencing, so this capability flag is unnecessary. I
> suggest we omit it until it is needed - at which point we can re-open the
> discussion based on a concrete usecase.
>
> If you *really* want to have the flag, I suggest:
> * have one for fencing doesn't order, i.e. fence flag is ignored and the
>   device does not work in-order
> * make it clear in the documentation that even if fencing doesn't order
>   it's still not an error to put in a fence flag - it will just be ignored.

Since we have only two class of devices
1) By default fence is there between transfers
2) Need to explicitly add a fence.

Since there is no device that does not support the fence. IMO, As
Burce suggested we don't need a fence flag.
In class (1) devices, for fastpath functions, RTE_DMA_OP_FLAG_FENCE will be NOP.



>
> /Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v4] dmadev: introduce DMA device library
  2021-07-16  3:04       ` fengchengwen
  2021-07-16  9:50         ` Bruce Richardson
@ 2021-07-16 12:40         ` Jerin Jacob
  2021-07-16 12:48           ` Bruce Richardson
  1 sibling, 1 reply; 339+ messages in thread
From: Jerin Jacob @ 2021-07-16 12:40 UTC (permalink / raw)
  To: fengchengwen
  Cc: Bruce Richardson, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On Fri, Jul 16, 2021 at 8:34 AM fengchengwen <fengchengwen@huawei.com> wrote:
>
> On 2021/7/16 0:33, Bruce Richardson wrote:
> > On Fri, Jul 16, 2021 at 12:04:33AM +0800, fengchengwen wrote:
> >> @burce, jerin  Some unmodified review comments are returned here:
> >>
>
> [snip]
>
> >
> >> 2.  COMMENT: > + * @see struct rte_dmadev_info::dev_capa
> >>> + */
> >> Drop this flag as unnecessary. All devices either always provide ordering
> >> guarantee - in which case it's a no-op - or else support the flag.
> >>
> >> REPLY: I prefer define it, it could let user know whether support fence.
> >>
> > I don't see it that way. The flag is pointless because the application
> > can't use it to make any decisions. If two operations require ordering the
> > application must use the fence flag because if the device doesn't guarantee
> > ordering it's necessary, and if the device does guarantee ordering it's
> > better and easier to just specify the flag than to put in code branches.
> > Having this as a capability is just going to confuse the user - better to
> > just say that if you need ordering, put in a fence.
> >
>
> If driver don't support fence, and application set the fence flag, What's
> driving behavior like? return error or implement fence at driver layer ?
>
> If expose the fence capability to application, then application could decide
> which option to use. e.g. commit the operations before the fence and make sure it completes,
> or use another collaborative approach.
>
> I think in this manner, the driver implementation can be simplified.
>
> [snip]
>
> >> 4.
> >> COMMENT: > +        * @see RTE_DMA_DEV_TO_MEM
> >>> +        * @see RTE_DMA_DEV_TO_DEV
> >> Since we can set of only one direction per vchan . Should be we make
> >> it as enum to
> >> make it clear.
> >>
> >> REPLY: May some devices support it. I think it's OK for future use.
> >>
> > +1
> > That may need a capability flag though, to indicate if a device supports
> > multi-direction in a single vchannel.
>
> There are a lot of combinations, and I tend not to add a capability for multi-direction.
> Currently, no device supports multiple directions, So can we delay that definition?

Yes. IMO, we need to change the comment  from "Set of supported
transfer directions" to
"Transfer direction"

If channel supports multiple directions, then in fastpath we need
another flag to select
which direction to use.

IMO, Since none of the devices supports this mode, I think, we should
change the comment
to "Transfer direction"

>
> [snip]
>
> thanks

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v4] dmadev: introduce DMA device library
  2021-07-16 12:40         ` Jerin Jacob
@ 2021-07-16 12:48           ` Bruce Richardson
  0 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-16 12:48 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: fengchengwen, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On Fri, Jul 16, 2021 at 06:10:48PM +0530, Jerin Jacob wrote:
> On Fri, Jul 16, 2021 at 8:34 AM fengchengwen <fengchengwen@huawei.com> wrote:
> >
> > On 2021/7/16 0:33, Bruce Richardson wrote:
> > > On Fri, Jul 16, 2021 at 12:04:33AM +0800, fengchengwen wrote:
> > >> @burce, jerin  Some unmodified review comments are returned here:
> > >>
> >
> > [snip]
> >
> > >
> > >> 2.  COMMENT: > + * @see struct rte_dmadev_info::dev_capa
> > >>> + */
> > >> Drop this flag as unnecessary. All devices either always provide ordering
> > >> guarantee - in which case it's a no-op - or else support the flag.
> > >>
> > >> REPLY: I prefer define it, it could let user know whether support fence.
> > >>
> > > I don't see it that way. The flag is pointless because the application
> > > can't use it to make any decisions. If two operations require ordering the
> > > application must use the fence flag because if the device doesn't guarantee
> > > ordering it's necessary, and if the device does guarantee ordering it's
> > > better and easier to just specify the flag than to put in code branches.
> > > Having this as a capability is just going to confuse the user - better to
> > > just say that if you need ordering, put in a fence.
> > >
> >
> > If driver don't support fence, and application set the fence flag, What's
> > driving behavior like? return error or implement fence at driver layer ?
> >
> > If expose the fence capability to application, then application could decide
> > which option to use. e.g. commit the operations before the fence and make sure it completes,
> > or use another collaborative approach.
> >
> > I think in this manner, the driver implementation can be simplified.
> >
> > [snip]
> >
> > >> 4.
> > >> COMMENT: > +        * @see RTE_DMA_DEV_TO_MEM
> > >>> +        * @see RTE_DMA_DEV_TO_DEV
> > >> Since we can set of only one direction per vchan . Should be we make
> > >> it as enum to
> > >> make it clear.
> > >>
> > >> REPLY: May some devices support it. I think it's OK for future use.
> > >>
> > > +1
> > > That may need a capability flag though, to indicate if a device supports
> > > multi-direction in a single vchannel.
> >
> > There are a lot of combinations, and I tend not to add a capability for multi-direction.
> > Currently, no device supports multiple directions, So can we delay that definition?
> 
> Yes. IMO, we need to change the comment  from "Set of supported
> transfer directions" to
> "Transfer direction"
> 
> If channel supports multiple directions, then in fastpath we need
> another flag to select
> which direction to use.
> 
> IMO, Since none of the devices supports this mode, I think, we should
> change the comment
> to "Transfer direction"
> 
Ok for me.

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v4] dmadev: introduce DMA device library
  2021-07-15 16:04   ` fengchengwen
  2021-07-15 16:33     ` Bruce Richardson
@ 2021-07-16 12:54     ` Jerin Jacob
  1 sibling, 0 replies; 339+ messages in thread
From: Jerin Jacob @ 2021-07-16 12:54 UTC (permalink / raw)
  To: fengchengwen
  Cc: Thomas Monjalon, Ferruh Yigit, Richardson, Bruce, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On Thu, Jul 15, 2021 at 9:34 PM fengchengwen <fengchengwen@huawei.com> wrote:
>
> @burce, jerin  Some unmodified review comments are returned here:
>
> 1.
> COMMENT: > +                    memset(dmadev_shared_data->data, 0,
> > +                            sizeof(dmadev_shared_data->data));
> I believe all memzones are zero on allocation anyway, so this memset is
> unecessary and can be dropped.
>
> REPLY: I didn't find a comment to clear with this function.
>
> 2.
> COMMENT: > + * @see struct rte_dmadev_info::dev_capa
> > + */
> Drop this flag as unnecessary. All devices either always provide ordering
> guarantee - in which case it's a no-op - or else support the flag.
>
> REPLY: I prefer define it, it could let user know whether support fence.
>
> 3.
> COMMENT: I would suggest v4 to split the patch like (so that we can review and
> ack each patch)
> 1) Only public header file with Doxygen inclusion, (There is a lot of
> Doxygen syntax issue in the patch)
> 2) 1 or more patches for implementation.
>
> REPLY: the V4 still one patch and with doxyen files, It's better now for just
> one patch of header file and implementation.
> Later I will push doxyen file patch and skelton file patch.

Makes sense. Whichever version you would like to have Ack, please
split in that version.

>
> 4.
> COMMENT: > +        * @see RTE_DMA_DEV_TO_MEM
> > +        * @see RTE_DMA_DEV_TO_DEV
> Since we can set of only one direction per vchan . Should be we make
> it as enum to
> make it clear.
>
> REPLY: May some devices support it. I think it's OK for future use.

Sent comment in another thread.

>
> 5.
> COMMENT: > +__rte_experimental
> > +int
> > +rte_dmadev_vchan_release(uint16_t dev_id, uint16_t vchan);
> I would like to remove this to align with other device class in DPDK and use
> configure and start again if there change in vchannel setup/
>
> REPLY: I think we could have dynamic reconfig vchan without stop device ability.

I think, In general, drivers are doing a lot of stuff in _start(),
this will give enough flexibility
to driver(like getting a complete view of HW resources at start()).
IMO, there is no need to deviate from other classes of DPDK devices here.
Also, the release is a slow path operation, so adding more calls for
reconfiguring is OK like
other DPDK device classes.



>
> 6.
> COMMENT: > +
> > +#define RTE_DMADEV_ALL_VCHAN   0xFFFFu
> RTE_DMADEV_VCHAN_ALL ??
>
> REPLY: I don't like reserved a fix length queue stats in 'struct rte_eth_stats',
> which may counter the RTE_ETHDEV_QUEUE_STAT_CNTRS too short problem.
> So here I define the API could get one or ALL vchans stats.

I meant subsystem name can come as the third item like RTE_DMADEV_VCHAN_ALL
vs RTE_DMADEV_ALL_VCHAN where ALL is not a subsystem
i.e RTE_DMADEV_<Sub system in DMA>_.._ACTION/VERB/etc




>
> 7.
> COMMENT: > +rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, const struct rte_dma_sg *sg,
> > +                  uint64_t flags)
> In order to avoid population of rte_dma_sg in stack (as it is
> fastpath), I would like
> to change the API as
> rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dma_sge
> *src,  struct rte_dma_sge *dst,   uint16_t nb_src, uint16_t nb_dst,
> uint64_t flags)
>
> REPLY: Too many (7) parameters if it separates. I prefer define a struct wrap it.

I agree if it is a slow path function. However,

I strongly prefer to not have one more stack indirection in fastpath
and also we will not
be adding new arguments to this function in the future. So that way,
it is better as

rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dma_sge
src,  struct rte_dma_sge *dst,   uint16_t nb_src, uint16_t nb_dst,
uint64_t flags)



>
> 8.
> COMMENT: change RTE_LOG_REGISTER(rte_dmadev_logtype, lib.dmadev, INFO); to
> RTE_LOG_REGISTER_DEFAULT, and change rte_dmadev_logtype to logtype.
>
> REPLY: Our CI version still don't support RTE_LOG_REGISTER_DEFAULT (have not sync newest version),
> I think it could fix in next version or patch.
> and because RTE_LOG_REGISTER define the rte_dmadev_logtype as a un-static variable, if we
> change to logtype, there maybe namespace conflict, so I think it's ok to retain the original
> variables.
>
> thanks.
>
> [snip]
>
> On 2021/7/15 23:41, Chengwen Feng wrote:
> > This patch introduce 'dmadevice' which is a generic type of DMA
> > device.
> >
> > The APIs of dmadev library exposes some generic operations which can
> > enable configuration and I/O with the DMA devices.
> >
> > Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> > ---
> > v4:
> > * replace xxx_complete_fails with xxx_completed_status.
> > * add SILENT capability, also a silent_mode in rte_dmadev_conf.
> > * add op_flag_llc for performance.
> > * rename dmadev_xxx_t to rte_dmadev_xxx_t to avoid namespace conflict.
> > * delete filed 'enqueued_count' from rte_dmadev_stats.
> > * make rte_dmadev hold 'dev_private' filed.
> > * add RTE_DMA_STATUS_NOT_ATTEMPED status code.
> > * rename RTE_DMA_STATUS_ACTIVE_DROP to RTE_DMA_STATUS_USER_ABORT.
> > * rename rte_dma_sg(e) to rte_dmadev_sg(e) to make sure all struct
> >   prefix with rte_dmadev.
> > * put the comment afterwards.
> > * fix some doxgen problem.
> > * delete macro RTE_DMADEV_VALID_DEV_ID_OR_RET and
> >   RTE_DMADEV_PTR_OR_ERR_RET.
> > * replace strlcpy with rte_strscpy.
> > * other minor modifications from review comment.
> > v3:
>
> [snip]

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v5] dmadev: introduce DMA device library
  2021-07-16  2:45 ` [dpdk-dev] [PATCH v5] " Chengwen Feng
@ 2021-07-16 13:20   ` Jerin Jacob
  2021-07-16 14:41   ` Bruce Richardson
  1 sibling, 0 replies; 339+ messages in thread
From: Jerin Jacob @ 2021-07-16 13:20 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: Thomas Monjalon, Ferruh Yigit, Richardson, Bruce, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On Fri, Jul 16, 2021 at 8:19 AM Chengwen Feng <fengchengwen@huawei.com> wrote:
>
> This patch introduce 'dmadevice' which is a generic type of DMA
> device.
>
> The APIs of dmadev library exposes some generic operations which can
> enable configuration and I/O with the DMA devices.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> ---
> v5:
> * add doxy-api-* file modify.
> * use RTE_LOG_REGISTER_DEFAULT.
> * fix typo.
> * resolve some incorrect comments.
> * fix some doxgen problem.
> * fix version.map still hold rte_dmadev_completed_fails.
> v4:
> * replace xxx_complete_fails with xxx_completed_status.
> * add SILENT capability, also a silent_mode in rte_dmadev_conf.
> * add op_flag_llc for performance.
> * rename dmadev_xxx_t to rte_dmadev_xxx_t to avoid namespace conflict.
> * delete filed 'enqueued_count' from rte_dmadev_stats.
> * make rte_dmadev hold 'dev_private' filed.
> * add RTE_DMA_STATUS_NOT_ATTEMPED status code.
> * rename RTE_DMA_STATUS_ACTIVE_DROP to RTE_DMA_STATUS_USER_ABORT.
> * rename rte_dma_sg(e) to rte_dmadev_sg(e) to make sure all struct
>   prefix with rte_dmadev.
> * put the comment afterwards.
> * fix some doxgen problem.
> * delete macro RTE_DMADEV_VALID_DEV_ID_OR_RET and
>   RTE_DMADEV_PTR_OR_ERR_RET.
> * replace strlcpy with rte_strscpy.
> * other minor modifications from review comment.
> v3:
> * rm reset and fill_sg ops.
> * rm MT-safe capabilities.
> * add submit flag.
> * redefine rte_dma_sg to implement asymmetric copy.
> * delete some reserved field for future use.
> * rearrangement rte_dmadev/rte_dmadev_data struct.
> * refresh rte_dmadev.h copyright.
> * update vchan setup parameter.
> * modified some inappropriate descriptions.
> * arrange version.map alphabetically.
> * other minor modifications from review comment.
> ---
>  MAINTAINERS                  |    4 +
>  config/rte_config.h          |    3 +
>  doc/api/doxy-api-index.md    |    1 +
>  doc/api/doxy-api.conf.in     |    1 +
>  lib/dmadev/meson.build       |    7 +
>  lib/dmadev/rte_dmadev.c      |  539 ++++++++++++++++++++++
>  lib/dmadev/rte_dmadev.h      | 1028 ++++++++++++++++++++++++++++++++++++++++++
>  lib/dmadev/rte_dmadev_core.h |  182 ++++++++
>  lib/dmadev/rte_dmadev_pmd.h  |   72 +++
>  lib/dmadev/version.map       |   37 ++
>  lib/meson.build              |    1 +
>  11 files changed, 1875 insertions(+)
>  create mode 100644 lib/dmadev/meson.build
>  create mode 100644 lib/dmadev/rte_dmadev.c
>  create mode 100644 lib/dmadev/rte_dmadev.h
>  create mode 100644 lib/dmadev/rte_dmadev_core.h
>  create mode 100644 lib/dmadev/rte_dmadev_pmd.h
>  create mode 100644 lib/dmadev/version.map
>
> diff --git a/MAINTAINERS b/MAINTAINERS
> index af2a91d..e01a07f 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -495,6 +495,10 @@ F: drivers/raw/skeleton/
>  F: app/test/test_rawdev.c
>  F: doc/guides/prog_guide/rawdev.rst
>
> +DMA device API - EXPERIMENTAL
> +M: Chengwen Feng <fengchengwen@huawei.com>
> +F: lib/dmadev/
> +
> @@ -0,0 +1,1028 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + * Copyright(c) 2021 Intel Corporation.
> + * Copyright(c) 2021 Marvell International Ltd.
> + * Copyright(c) 2021 SmartShare Systems.
> + */
> +
> +#ifndef _RTE_DMADEV_H_
> +#define _RTE_DMADEV_H_
> +
> +/**
> + * @file rte_dmadev.h
> + *
> + * RTE DMA (Direct Memory Access) device APIs.
> + *
> + * The DMA framework is built on the following model:
> + *
> + *     ---------------   ---------------       ---------------
> + *     | virtual DMA |   | virtual DMA |       | virtual DMA |
> + *     | channel     |   | channel     |       | channel     |
> + *     ---------------   ---------------       ---------------
> + *            |                |                      |
> + *            ------------------                      |
> + *                     |                              |
> + *               ------------                    ------------
> + *               |  dmadev  |                    |  dmadev  |
> + *               ------------                    ------------
> + *                     |                              |
> + *            ------------------               ------------------
> + *            | HW-DMA-channel |               | HW-DMA-channel |
> + *            ------------------               ------------------
> + *                     |                              |
> + *                     --------------------------------
> + *                                     |
> + *                           ---------------------
> + *                           | HW-DMA-Controller |
> + *                           ---------------------
> + *
> + * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
> + * each HW-DMA-channel should be represented by a dmadev.
> + *
> + * The dmadev could create multiple virtual DMA channel, each virtual DMA

channels?


> + * channel represents a different transfer context. The DMA operation request
> + * must be submitted to the virtual DMA channel. e.g. Application could create
> + * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
> + * virtual DMA channel 1 for memory-to-device transfer scenario.
> + *
> + * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
> + * PCI/SoC device probing phase performed at EAL initialization time. And could
> + * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
> + * phase.
> + *
> + * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
> + * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
> + *
> + * The functions exported by the dmadev API to setup a device designated by its
> + * device identifier must be invoked in the following order:
> + *     - rte_dmadev_configure()
> + *     - rte_dmadev_vchan_setup()
> + *     - rte_dmadev_start()
> + *
> + * Then, the application can invoke dataplane APIs to process jobs.
> + *
> + * If the application wants to change the configuration (i.e. invoke
> + * rte_dmadev_configure()), it must invoke rte_dmadev_stop() first to stop the
> + * device and then do the reconfiguration before invoking rte_dmadev_start()
> + * again. The dataplane APIs should not be invoked when the device is stopped.
> + *
> + * Finally, an application can close a dmadev by invoking the
> + * rte_dmadev_close() function.
> + *
> + * The dataplane APIs include two parts:
> + * The first part is the submission of operation requests:
> + *     - rte_dmadev_copy()
> + *     - rte_dmadev_copy_sg()
> + *     - rte_dmadev_fill()
> + *     - rte_dmadev_submit()
> + *
> + * These APIs could work with different virtual DMA channels which have
> + * different contexts.
> + *
> + * The first three APIs are used to submit the operation request to the virtual
> + * DMA channel, if the submission is successful, a uint16_t ring_idx is
> + * returned, otherwise a negative number is returned.
> + *
> + * The last API was use to issue doorbell to hardware, and also there are flags

use->used?

> + * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
> + * same work.
> + *
> + * The second part is to obtain the result of requests:
> + *     - rte_dmadev_completed()
> + *         - return the number of operation requests completed successfully.
> + *     - rte_dmadev_completed_status()
> + *         - return the number of operation requests completed.
> + *
> + * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
> + * rte_dmadev_fill()) returned, the rules are as follows:
> + *     - ring_idx for each virtual DMA channel are independent.
> + *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
> + *       when it reach UINT16_MAX, it wraps back to zero.
> + *     - This ring_idx can be used by applications to track per-operation
> + *       metadata in an application-defined circular ring.
> + *     - The initial ring_idx of a virtual DMA channel is zero, after the
> + *       device is stopped, the ring_idx needs to be reset to zero.
> + *
> + * One example:
> + *     - step-1: start one dmadev
> + *     - step-2: enqueue a copy operation, the ring_idx return is 0
> + *     - step-3: enqueue a copy operation again, the ring_idx return is 1
> + *     - ...
> + *     - step-101: stop the dmadev
> + *     - step-102: start the dmadev
> + *     - step-103: enqueue a copy operation, the cookie return is 0
> + *     - ...
> + *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
> + *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
> + *     - ...
> + *
> + * By default, all the functions of the dmadev API exported by a PMD are
> + * lock-free functions which assume to not be invoked in parallel on different
> + * logical cores to work on the same target object.
should we add, object like  dev_id, vchan_id etc. Upto you.


IMO, We should tell about "silent" mode here and it is the implication
on rte_dmadev_completed_*
as "silent" changes above scheme in the completion side.


> +/**
> + * A structure used to retrieve the information of an DMA device.

a DMA?

> + * A structure used to descript DMA port parameters.
> + */
> +struct rte_dmadev_port_param {
> +       enum rte_dmadev_port_type port_type; /**< The device port type. */
> +       union {
> +               /** For PCIE port:
> +                *
> +                * The following model show SoC's PCIE module connects to

shows?

> +                * multiple PCIE hosts and multiple endpoints. The PCIE module
> +                * has an integrate DMA controller.
> +                * If the DMA wants to access the memory of host A, it can be
> +                * initiated by PF1 in core0, or by VF0 of PF0 in core0.
> +                *
> +                * System Bus
> +                *    |     ----------PCIE module----------
> +                *    |     Bus
> +                *    |     Interface
> +                *    |     -----        ------------------
> +                *    |     |   |        | PCIE Core0     |
> +                *    |     |   |        |                |        -----------
> +                *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
> +                *    |     |   |--------|        |- VF-1 |--------| Root    |
> +                *    |     |   |        |   PF-1         |        | Complex |
> +                *    |     |   |        |   PF-2         |        -----------
> +                *    |     |   |        ------------------
> +                *    |     |   |
> +                *    |     |   |        ------------------
> +                *    |     |   |        | PCIE Core1     |
> +                *    |     |   |        |                |        -----------
> +                *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
> +                *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
> +                *    |     |   |        |        |- VF-1 |        | Complex |
> +                *    |     |   |        |   PF-2         |        -----------
> +                *    |     |   |        ------------------
> +                *    |     |   |
> +                *    |     |   |        ------------------
> +                *    |     |DMA|        |                |        ------
> +                *    |     |   |        |                |--------| EP |
> +                *    |     |   |--------| PCIE Core2     |        ------
> +                *    |     |   |        |                |        ------
> +                *    |     |   |        |                |--------| EP |
> +                *    |     |   |        |                |        ------
> +                *    |     -----        ------------------
> +                *
> +                * The following structure is used to describe the above access
> +                * port.
> +                *
> +                * @note If some fields are not supported by hardware, set
> +                * these fields to zero. And also there are no capabilities
> +                * defined for this, it is the duty of the application to set
> +                * the correct parameters.

Without getting, capabilities application can not set zero. I would
suggest rewording
something like.

@note If some fields can not be supported by the hardware/driver, then
the driver ignores those
fields. Please check driver-specific documentation for limitations and
capablites.


* @note If some fields are not supported by hardware, set
> +                * these fields to zero. And also there are no capabilities
> +                * defined for this, it is the duty of the application to set
> +                * the correct parameters.

> +                */
> +               struct {
> +                       uint64_t coreid : 4; /**< PCIE core id used. */
> +                       uint64_t pfid : 8; /**< PF id used. */
> +                       uint64_t vfen : 1; /**< VF enable bit. */
> +                       uint64_t vfid : 16; /**< VF id used. */
> +                       uint64_t pasid : 20;
> +                       /**< The pasid filed in TLP packet. */
> +                       uint64_t attr : 3;
> +                       /**< The attributes filed in TLP packet. */
> +                       uint64_t ph : 2;
> +                       /**< The processing hint filed in TLP packet. */
> +                       uint64_t st : 16;
> +                       /**< The steering tag filed in TLP packet. */
> +               } pcie;
> +       };
> +       uint64_t reserved[2]; /**< Reserved for future fields. */
> +};
> +
> +/**
> + * A structure used to configure a virtual DMA channel.
> + */
> +struct rte_dmadev_vchan_conf {
> +       uint8_t direction;
> +       /**< Set of supported transfer directions

Transfer direction

> +        * @see RTE_DMA_DIR_MEM_TO_MEM
> +        * @see RTE_DMA_DIR_MEM_TO_DEV
> +        * @see RTE_DMA_DIR_DEV_TO_MEM
> +        * @see RTE_DMA_DIR_DEV_TO_DEV
> +        */
> +
> +       /** Number of descriptor for the virtual DMA channel */
> +       uint16_t nb_desc;
> +       /** 1) Used to describes the port parameter in the device-to-memory
> +        * transfer scenario.
> +        * 2) Used to describes the source port parameter in the
> +        * device-to-device transfer scenario.
> +        * @see struct rte_dmadev_port_param
> +        */
> +       struct rte_dmadev_port_param src_port;
> +       /** 1) Used to describes the port parameter in the memory-to-device
> +        * transfer scenario.
> +        * 2) Used to describes the destination port parameter in the
> +        * device-to-device transfer scenario.
> +        * @see struct rte_dmadev_port_param
> +        */
> +       struct rte_dmadev_port_param dst_port;
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Allocate and set up a virtual DMA channel.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param conf
> + *   The virtual DMA channel configuration structure encapsulated into
> + *   rte_dmadev_vchan_conf object.
> + *
> + * @return
> + *   - >=0: Allocate success, it is the virtual DMA channel id. This value must
> + *          be less than the field 'max_vchans' of struct rte_dmadev_conf
> + *          which configured by rte_dmadev_configure().
> + *   - <0: Error code returned by the driver virtual channel setup function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_vchan_setup(uint16_t dev_id,
> +                      const struct rte_dmadev_vchan_conf *conf);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Release a virtual DMA channel.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel which return by vchan setup.
> + *
> + * @return
> + *   - =0: Successfully release the virtual DMA channel.
> + *   - <0: Error code returned by the driver virtual channel release function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_vchan_release(uint16_t dev_id, uint16_t vchan);

Commented on another thread.

> +
> +/**
> + * rte_dmadev_stats - running statistics.
> + */
> +struct rte_dmadev_stats {
> +       uint64_t submitted_count;
> +       /**< Count of operations which were submitted to hardware. */
> +       uint64_t completed_fail_count;
> +       /**< Count of operations which failed to complete. */
> +       uint64_t completed_count;
> +       /**< Count of operations which successfully complete. */
> +};
> +
> +#define RTE_DMADEV_ALL_VCHAN   0xFFFFu

Commented on another thread. No strong opinion.


> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a copy operation onto the virtual DMA channel.
> + *
> + * This queues up a copy operation to be performed by hardware, if the 'flags'
> + * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger hardware to begin
> + * this operation, otherwise do not trigger hardware.

hardware  -> doorbell.

> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param src
> + *   The address of the source buffer.
> + * @param dst
> + *   The address of the destination buffer.
> + * @param length
> + *   The length of the data to be copied.
> + * @param flags
> + *   An flags for this operation.
> + *   @see RTE_DMA_OP_FLAG_*
> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued copy job.
> + *   - <0: Error code returned by the driver copy function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
> +               uint32_t length, uint64_t flags)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans)
> +               return -EINVAL;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
> +#endif
> +
> +       return (*dev->copy)(dev, vchan, src, dst, length, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a scatter list copy operation onto the virtual DMA channel.
> + *
> + * This queues up a scatter list copy operation to be performed by hardware, if
> + * the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger hardware
> + * to begin this operation, otherwise do not trigger hardware.

harware -> doorbell

> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param sg
> + *   The pointer of scatterlist.
> + * @param flags
> + *   An flags for this operation.
> + *   @see RTE_DMA_OP_FLAG_*
> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued copy scatterlist job.
> + *   - <0: Error code returned by the driver copy scatterlist function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan,
> +                  const struct rte_dmadev_sg *sg,


Sent comment in another thread.


> +                  uint64_t flags)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans ||
> +           sg == NULL || sg->src == NULL || sg->dst == NULL ||
> +           sg->nb_src == 0 || sg->nb_dst == 0)
> +               return -EINVAL;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
> +#endif
> +
> +       return (*dev->copy_sg)(dev, vchan, sg, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a fill operation onto the virtual DMA channel.
> + *
> + * This queues up a fill operation to be performed by hardware, if the 'flags'
> + * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger hardware to begin
> + * this operation, otherwise do not trigger hardware.

hardware  -> doorbell.

> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param pattern
> + *   The pattern to populate the destination buffer with.
> + * @param dst
> + *   The address of the destination buffer.
> + * @param length
> + *   The length of the destination buffer.
> + * @param flags
> + *   An flags for this operation.
> + *   @see RTE_DMA_OP_FLAG_*
> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued fill job.
> + *   - <0: Error code returned by the driver fill function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
> +               rte_iova_t dst, uint32_t length, uint64_t flags)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans)
> +               return -EINVAL;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
> +#endif
> +
> +       return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Trigger hardware to begin performing enqueued operations.
> + *
> + * This API is used to write the "doorbell" to the hardware to trigger it
> + * to begin the operations previously enqueued by rte_dmadev_copy/fill().
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + *
> + * @return
> + *   - =0: Successfully trigger hardware.
> + *   - <0: Failure to trigger hardware.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans)
> +               return -EINVAL;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
> +#endif
> +
> +       return (*dev->submit)(dev, vchan);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Returns the number of operations that have been successfully completed.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param nb_cpls
> + *   The maximum number of completed operations that can be processed.
> + * @param[out] last_idx
> + *   The last completed operation's index.
> + *   If not required, NULL can be passed in.
> + * @param[out] has_error
> + *   Indicates if there are transfer error.
> + *   If not required, NULL can be passed in.
> + *
> + * @return
> + *   The number of operations that successfully completed. This return value
> + *   must be less than or equal to the value of nb_cpls.
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
> +                    uint16_t *last_idx, bool *has_error)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       uint16_t idx;
> +       bool err;
> +
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans ||
> +           nb_cpls == 0)
> +               return 0;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
> +#endif
> +
> +       /* Ensure the pointer values are non-null to simplify drivers.
> +        * In most cases these should be compile time evaluated, since this is
> +        * an inline function.
> +        * - If NULL is explicitly passed as parameter, then compiler knows the
> +        *   value is NULL
> +        * - If address of local variable is passed as parameter, then compiler
> +        *   can know it's non-NULL.
> +        */
> +       if (last_idx == NULL)
> +               last_idx = &idx;
> +       if (has_error == NULL)
> +               has_error = &err;
> +
> +       *has_error = false;
> +       return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Returns the number of operations that have been completed, and the
> + * operations result may succeed or fail.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param nb_cpls
> + *   Indicates the size of status array.
> + * @param[out] last_idx
> + *   The last completed operation's index.
> + *   If not required, NULL can be passed in.
> + * @param[out] status
> + *   The error code of operations that completed.
> + *   @see enum rte_dma_status_code
> + *
> + * @return
> + *   The number of operations that completed. This return value must be less
> + *   than or equal to the value of nb_cpls.
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
> +                           const uint16_t nb_cpls, uint16_t *last_idx,
> +                           enum rte_dma_status_code *status)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       uint16_t idx;
> +
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans ||
> +           nb_cpls == 0 || status == NULL)
> +               return 0;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
> +#endif
> +
> +       if (last_idx == NULL)
> +               last_idx = &idx;
> +
> +       return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
> +}
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +

Thanks for v5

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v5] dmadev: introduce DMA device library
  2021-07-16  2:45 ` [dpdk-dev] [PATCH v5] " Chengwen Feng
  2021-07-16 13:20   ` Jerin Jacob
@ 2021-07-16 14:41   ` Bruce Richardson
  1 sibling, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-16 14:41 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

On Fri, Jul 16, 2021 at 10:45:35AM +0800, Chengwen Feng wrote:
> This patch introduce 'dmadevice' which is a generic type of DMA
> device.
> 
> The APIs of dmadev library exposes some generic operations which can
> enable configuration and I/O with the DMA devices.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> ---
<snip>
> +
> +static struct rte_dmadev *
> +dmadev_allocate(const char *name)
> +{
> +	struct rte_dmadev *dev;
> +	uint16_t dev_id;
> +
> +	dev = dmadev_find(name);
> +	if (dev != NULL) {
> +		RTE_DMADEV_LOG(ERR, "DMA device already allocated\n");
> +		return NULL;
> +	}
> +
> +	dev_id = dmadev_find_free_dev();
> +	if (dev_id == RTE_DMADEV_MAX_DEVS) {
> +		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices\n");
> +		return NULL;
> +	}
> +
> +	if (dmadev_shared_data_prepare() != 0) {
> +		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
> +		return NULL;
> +	}
> +
The order of the "shared_data_prepare" and "find_free_dev" calls needs to
be reversed. When calling allocate for the first dmadev driver you will get
a segfault in find function, since it accesses the shared data which has
not been created yet.

Regards,
/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v6] dmadev: introduce DMA device library
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (8 preceding siblings ...)
  2021-07-16  2:45 ` [dpdk-dev] [PATCH v5] " Chengwen Feng
@ 2021-07-19  3:29 ` Chengwen Feng
  2021-07-19  6:21   ` Jerin Jacob
  2021-07-19 13:05 ` [dpdk-dev] [PATCH v7] " Chengwen Feng
                   ` (19 subsequent siblings)
  29 siblings, 1 reply; 339+ messages in thread
From: Chengwen Feng @ 2021-07-19  3:29 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce 'dmadevice' which is a generic type of DMA
device.

The APIs of dmadev library exposes some generic operations which can
enable configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
v6:
* delete fence capability.
* delete vchan_release ops.
* copy_sg direct use src/dst/nb_src/nb_dst as paramter.
* define rte_dma_direction, don't support multiple direction in the
  same vchan.
* fix segment fault when allocate.
* fix typo.
* fix comments format.
v5:
* add doxy-api-* file modify.
* use RTE_LOG_REGISTER_DEFAULT.
* fix typo.
* resolve some incorrect comments.
* fix some doxgen problem.
* fix version.map still hold rte_dmadev_completed_fails.
v4:
* replace xxx_complete_fails with xxx_completed_status.
* add SILENT capability, also a silent_mode in rte_dmadev_conf.
* add op_flag_llc for performance.
* rename dmadev_xxx_t to rte_dmadev_xxx_t to avoid namespace conflict.
* delete filed 'enqueued_count' from rte_dmadev_stats.
* make rte_dmadev hold 'dev_private' filed.
* add RTE_DMA_STATUS_NOT_ATTEMPED status code.
* rename RTE_DMA_STATUS_ACTIVE_DROP to RTE_DMA_STATUS_USER_ABORT.
* rename rte_dma_sg(e) to rte_dmadev_sg(e) to make sure all struct
  prefix with rte_dmadev.
* put the comment afterwards.
* fix some doxgen problem.
* delete macro RTE_DMADEV_VALID_DEV_ID_OR_RET and
  RTE_DMADEV_PTR_OR_ERR_RET.
* replace strlcpy with rte_strscpy.
* other minor modifications from review comment.
v3:
* rm reset and fill_sg ops.
* rm MT-safe capabilities.
* add submit flag.
* redefine rte_dma_sg to implement asymmetric copy.
* delete some reserved field for future use.
* rearrangement rte_dmadev/rte_dmadev_data struct.
* refresh rte_dmadev.h copyright.
* update vchan setup parameter.
* modified some inappropriate descriptions.
* arrange version.map alphabetically.
* other minor modifications from review comment.
---
 MAINTAINERS                  |    4 +
 config/rte_config.h          |    3 +
 doc/api/doxy-api-index.md    |    1 +
 doc/api/doxy-api.conf.in     |    1 +
 lib/dmadev/meson.build       |    7 +
 lib/dmadev/rte_dmadev.c      |  520 +++++++++++++++++++++
 lib/dmadev/rte_dmadev.h      | 1025 ++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h |  182 ++++++++
 lib/dmadev/rte_dmadev_pmd.h  |   72 +++
 lib/dmadev/version.map       |   36 ++
 lib/meson.build              |    1 +
 11 files changed, 1852 insertions(+)
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index af2a91d..e01a07f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -495,6 +495,10 @@ F: drivers/raw/skeleton/
 F: app/test/test_rawdev.c
 F: doc/guides/prog_guide/rawdev.rst
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+
 
 Memory Pool Drivers
 -------------------
diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c..331a431 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -81,6 +81,9 @@
 /* rawdev defines */
 #define RTE_RAWDEV_MAX_DEVS 64
 
+/* dmadev defines */
+#define RTE_DMADEV_MAX_DEVS 64
+
 /* ip_fragmentation defines */
 #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
 #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107..ce08250 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -27,6 +27,7 @@ The public API headers are grouped by topics:
   [event_timer_adapter]    (@ref rte_event_timer_adapter.h),
   [event_crypto_adapter]   (@ref rte_event_crypto_adapter.h),
   [rawdev]             (@ref rte_rawdev.h),
+  [dmadev]             (@ref rte_dmadev.h),
   [metrics]            (@ref rte_metrics.h),
   [bitrate]            (@ref rte_bitrate.h),
   [latency]            (@ref rte_latencystats.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 325a019..109ec1f 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -35,6 +35,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
                           @TOPDIR@/lib/distributor \
+                          @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/efd \
                           @TOPDIR@/lib/ethdev \
                           @TOPDIR@/lib/eventdev \
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000..d2fc85e
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+sources = files('rte_dmadev.c')
+headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000..eaec13f
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,520 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
+
+static const char *MZ_RTE_DMADEV_DATA = "rte_dmadev_data";
+/* Shared memory between primary and secondary processes. */
+static struct {
+	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
+} *dmadev_shared_data;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dmadev_logtype, INFO);
+#define RTE_DMADEV_LOG(level, ...) \
+	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "" __VA_ARGS__)
+
+/* Macros to check for valid device id */
+#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dmadev_is_valid_dev(dev_id)) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+static int
+dmadev_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMADEV_LOG(ERR, "Name can't be NULL\n");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMADEV_LOG(ERR, "Zero length DMA device name\n");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
+		RTE_DMADEV_LOG(ERR, "DMA device name is too long\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint16_t
+dmadev_find_free_dev(void)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (dmadev_shared_data->data[i].dev_name[0] == '\0') {
+			RTE_ASSERT(rte_dmadevices[i].state ==
+				   RTE_DMADEV_UNUSED);
+			return i;
+		}
+	}
+
+	return RTE_DMADEV_MAX_DEVS;
+}
+
+static struct rte_dmadev*
+dmadev_find(const char *name)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&
+		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
+			return &rte_dmadevices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dmadev_shared_data_prepare(void)
+{
+	const struct rte_memzone *mz;
+
+	if (dmadev_shared_data == NULL) {
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			/* Allocate port data and ownership shared memory. */
+			mz = rte_memzone_reserve(MZ_RTE_DMADEV_DATA,
+					 sizeof(*dmadev_shared_data),
+					 rte_socket_id(), 0);
+		} else
+			mz = rte_memzone_lookup(MZ_RTE_DMADEV_DATA);
+		if (mz == NULL)
+			return -ENOMEM;
+
+		dmadev_shared_data = mz->addr;
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+			memset(dmadev_shared_data->data, 0,
+			       sizeof(dmadev_shared_data->data));
+	}
+
+	return 0;
+}
+
+static struct rte_dmadev *
+dmadev_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t dev_id;
+
+	dev = dmadev_find(name);
+	if (dev != NULL) {
+		RTE_DMADEV_LOG(ERR, "DMA device already allocated\n");
+		return NULL;
+	}
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	dev_id = dmadev_find_free_dev();
+	if (dev_id == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices\n");
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+	dev->data = &dmadev_shared_data->data[dev_id];
+	dev->data->dev_id = dev_id;
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+
+	return dev;
+}
+
+static struct rte_dmadev *
+dmadev_attach_secondary(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t i;
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %s is not driven by the primary process\n",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[i];
+	dev->data = &dmadev_shared_data->data[i];
+	RTE_ASSERT(dev->data->dev_id == i);
+	dev->dev_private = dev->data->dev_private;
+
+	return dev;
+}
+
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dmadev_allocate(name);
+	else
+		dev = dmadev_attach_secondary(name);
+
+	if (dev == NULL)
+		return NULL;
+	dev->state = RTE_DMADEV_ATTACHED;
+
+	return dev;
+}
+
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev)
+{
+	void *dev_private_bak;
+
+	if (dev == NULL)
+		return -EINVAL;
+
+	if (dev->state == RTE_DMADEV_UNUSED)
+		return 0;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
+
+	dev_private_bak = dev->dev_private;
+	memset(dev, 0, sizeof(struct rte_dmadev));
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev->dev_private = dev_private_bak;
+	dev->state = RTE_DMADEV_UNUSED;
+
+	return 0;
+}
+
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name)
+{
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+	return dmadev_find(name);
+}
+
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id)
+{
+	return (dev_id < RTE_DMADEV_MAX_DEVS) &&
+		rte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED;
+}
+
+uint16_t
+rte_dmadev_count(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
+			count++;
+	}
+
+	return count;
+}
+
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dmadev_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->device = dev->device;
+	dev_info->nb_vchans = dev->data->dev_conf.max_vchans;
+
+	return 0;
+}
+
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->max_vchans > info.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure too many vchans\n", dev_id);
+		return -EINVAL;
+	}
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf);
+	if (ret == 0)
+		memcpy(&dev->data->dev_conf, dev_conf, sizeof(*dev_conf));
+
+	return ret;
+}
+
+int
+rte_dmadev_start(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already started\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->data->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dmadev_stop(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started == 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already stopped\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dmadev_close(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped before closing\n", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	return (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (conf == NULL)
+		return -EINVAL;
+
+	dev = &rte_dmadevices[dev_id];
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction >= RTE_DMA_DIR_BUTT) {
+		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(info.dev_capa & RTE_DMA_DEV_CAPA_MEM_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(info.dev_capa & RTE_DMA_DEV_CAPA_MEM_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(info.dev_capa & RTE_DMA_DEV_CAPA_DEV_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(info.dev_capa & RTE_DMA_DEV_CAPA_DEV_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u number of descriptors invalid\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, conf);
+}
+
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (stats == NULL)
+		return -EINVAL;
+	if (vchan >= dev->data->dev_conf.max_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dmadev_stats));
+}
+
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->data->dev_conf.max_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (f == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+
+	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
+	fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
+	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
+	fprintf(f, "  max_vchans_configured: %u\n", info.nb_vchans);
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
+
+int
+rte_dmadev_selftest(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
+	return (*dev->dev_ops->dev_selftest)(dev_id);
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000..ecac281
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,1025 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ * Copyright(c) 2021 Marvell International Ltd.
+ * Copyright(c) 2021 SmartShare Systems.
+ */
+
+#ifndef _RTE_DMADEV_H_
+#define _RTE_DMADEV_H_
+
+/**
+ * @file rte_dmadev.h
+ *
+ * RTE DMA (Direct Memory Access) device APIs.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW-DMA-channel |               | HW-DMA-channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW-DMA-Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel. e.g. Application could create
+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+ * virtual DMA channel 1 for memory-to-device transfer scenario.
+ *
+ * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
+ * PCI/SoC device probing phase performed at EAL initialization time. And could
+ * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
+ * phase.
+ *
+ * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dmadev_configure()
+ *     - rte_dmadev_vchan_setup()
+ *     - rte_dmadev_start()
+ *
+ * Then, the application can invoke dataplane APIs to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dmadev_configure() or rte_dmadev_vchan_setup()), it must invoke
+ * rte_dmadev_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dmadev_start() again. The dataplane APIs should not be
+ * invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the
+ * rte_dmadev_close() function.
+ *
+ * The dataplane APIs include two parts:
+ * The first part is the submission of operation requests:
+ *     - rte_dmadev_copy()
+ *     - rte_dmadev_copy_sg()
+ *     - rte_dmadev_fill()
+ *     - rte_dmadev_submit()
+ *
+ * These APIs could work with different virtual DMA channels which have
+ * different contexts.
+ *
+ * The first three APIs are used to submit the operation request to the virtual
+ * DMA channel, if the submission is successful, a uint16_t ring_idx is
+ * returned, otherwise a negative number is returned.
+ *
+ * The last API was used to issue doorbell to hardware, and also there are flags
+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
+ * same work.
+ *
+ * The second part is to obtain the result of requests:
+ *     - rte_dmadev_completed()
+ *         - return the number of operation requests completed successfully.
+ *     - rte_dmadev_completed_status()
+ *         - return the number of operation requests completed.
+ *
+ * @note If the dmadev works in silent mode, application does not invoke the
+ * above two completed APIs.
+ *
+ * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
+ * rte_dmadev_fill()) returned, the rules are as follows:
+ *     - ring_idx for each virtual DMA channel are independent.
+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *       when it reach UINT16_MAX, it wraps back to zero.
+ *     - This ring_idx can be used by applications to track per-operation
+ *       metadata in an application-defined circular ring.
+ *     - The initial ring_idx of a virtual DMA channel is zero, after the
+ *       device is stopped, the ring_idx needs to be reset to zero.
+ *
+ * One example:
+ *     - step-1: start one dmadev
+ *     - step-2: enqueue a copy operation, the ring_idx return is 0
+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1
+ *     - ...
+ *     - step-101: stop the dmadev
+ *     - step-102: start the dmadev
+ *     - step-103: enqueue a copy operation, the cookie return is 0
+ *     - ...
+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *
+ * The DMA operation address used in enqueue APIs (i.e. rte_dmadev_copy(),
+ * rte_dmadev_copy_sg(), rte_dmadev_fill()) defined as rte_iova_t type. The
+ * dmadev supports two types of address: memory address and device address.
+ *
+ * - memory address: the source and destination address of the memory-to-memory
+ * transfer type, or the source address of the memory-to-device transfer type,
+ * or the destination address of the device-to-memory transfer type.
+ * @note If the device support SVA, the memory address can be any VA address,
+ * otherwise it must be an IOVA address.
+ *
+ * - device address: the source and destination address of the device-to-device
+ * transfer type, or the source address of the device-to-memory transfer type,
+ * or the destination address of the memory-to-device transfer type.
+ *
+ * By default, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because there virtual DMA channels share the same
+ * HW-DMA-channel.
+ *
+ */
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_count(void);
+
+/* Enumerates DMA device capabilities. */
+#define RTE_DMA_DEV_CAPA_MEM_TO_MEM	(1ull << 0)
+/**< DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMA_DEV_CAPA_MEM_TO_DEV	(1ull << 1)
+/**< DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMA_DEV_CAPA_DEV_TO_MEM	(1ull << 2)
+/**< DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMA_DEV_CAPA_DEV_TO_DEV	(1ull << 3)
+/**< DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMA_DEV_CAPA_SVA		(1ull << 4)
+/**< DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMA_DEV_CAPA_SILENT		(1ull << 5)
+/**< DMA device support work in silent mode.
+ * In this mode, application don't required to invoke rte_dmadev_completed*()
+ * API.
+ *
+ * @see struct rte_dmadev_conf::silent_mode
+ */
+
+#define RTE_DMA_DEV_CAPA_OPS_COPY	(1ull << 32)
+/**< DMA device support copy ops.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMA_DEV_CAPA_OPS_COPY_SG	(1ull << 33)
+/**< DMA device support scatter-list copy ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMA_DEV_CAPA_OPS_FILL	(1ull << 34)
+/**< DMA device support fill ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ */
+struct rte_dmadev_info {
+	struct rte_device *device; /**< Generic Device information. */
+	uint64_t dev_capa; /**< Device capabilities (RTE_DMA_DEV_CAPA_*). */
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channels supported. */
+	uint16_t max_desc;
+	/**< Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/**< Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   - =0: Success, driver updates the information of the DMA device.
+ *   - <0: Error code returned by the driver info get function.
+ *
+ */
+__rte_experimental
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ */
+struct rte_dmadev_conf {
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channel to use.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dmadev_info which get from rte_dmadev_info_get().
+	 */
+	uint8_t silent_mode;
+	/**< Indicates whether to work in silent mode.
+	 * 0-default mode, 1-silent mode.
+	 *
+	 * @see RTE_DMA_DEV_CAPA_SILENT
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dmadev_conf
+ *   object.
+ *
+ * @return
+ *   - =0: Success, device configured.
+ *   - <0: Error code returned by the driver configuration function.
+ */
+__rte_experimental
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device started.
+ *   - <0: Error code returned by the driver start function.
+ */
+__rte_experimental
+int
+rte_dmadev_start(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dmadev_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device stopped.
+ *   - <0: Error code returned by the driver stop function.
+ */
+__rte_experimental
+int
+rte_dmadev_stop(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *  - =0: Successfully close device
+ *  - <0: Failure to close device
+ */
+__rte_experimental
+int
+rte_dmadev_close(uint16_t dev_id);
+
+/**
+ * rte_dma_direction - DMA transfer direction defines.
+ */
+enum rte_dma_direction {
+	RTE_DMA_DIR_MEM_TO_MEM = 0,
+	/**< DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV = 1,
+	/**< DMA transfer direction - from memory to device.
+	 * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs
+	 * through the PCIE interface. In this case, the ARM SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is ARM memory) to device (which is x86 host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM = 2,
+	/**< DMA transfer direction - from device to memory.
+	 * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs
+	 * through the PCIE interface. In this case, the ARM SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is x86 host memory) to memory (which is ARM memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV = 3,
+	/**< DMA transfer direction - from device to device.
+	 * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs
+	 * through the PCIE interface. In this case, the ARM SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is x86 host memory) to device (which is another x86 host
+	 * memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_BUTT
+};
+
+/**
+ * enum rte_dmadev_port_type - DMA access port type defines.
+ *
+ * @see struct rte_dmadev_port_param::port_type
+ */
+enum rte_dmadev_port_type {
+	RTE_DMADEV_PORT_NONE = 0,
+	RTE_DMADEV_PORT_PCIE, /**< The DMA access port is PCIE. */
+	RTE_DMADEV_PORT_BUTT
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dmadev_vchan_conf::src_port
+ * @see struct rte_dmadev_vchan_conf::dst_port
+ */
+struct rte_dmadev_port_param {
+	enum rte_dmadev_port_type port_type;
+	/**< The device access port type.
+	 * @see enum rte_dmadev_port_type
+	 */
+	union {
+		/** PCIE access port parameter.
+		 *
+		 * The following model shows SoC's PCIE module connects to
+		 * multiple PCIE hosts and multiple endpoints. The PCIE module
+		 * has an integrate DMA controller.
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * System Bus
+		 *    |     ----------PCIE module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIE Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIE Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIE Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * The following structure is used to describe the above access
+		 * port.
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		struct {
+			uint64_t coreid : 4; /**< PCIE core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			uint64_t pasid : 20;
+			/**< The pasid filed in TLP packet. */
+			uint64_t attr : 3;
+			/**< The attributes filed in TLP packet. */
+			uint64_t ph : 2;
+			/**< The processing hint filed in TLP packet. */
+			uint64_t st : 16;
+			/**< The steering tag filed in TLP packet. */
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ */
+struct rte_dmadev_vchan_conf {
+	enum rte_dma_direction direction;
+	/**< Transfer direction
+	 * @see enum rte_dma_direction
+	 */
+	uint16_t nb_desc;
+	/**< Number of descriptor for the virtual DMA channel */
+	struct rte_dmadev_port_param src_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+	struct rte_dmadev_port_param dst_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dmadev_vchan_conf object.
+ *
+ * @return
+ *   - >=0: Allocate success, it is the virtual DMA channel id. This value must
+ *          be less than the field 'max_vchans' of struct rte_dmadev_conf
+ *          which configured by rte_dmadev_configure().
+ *   - <0: Error code returned by the driver virtual channel setup function.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf);
+
+/**
+ * rte_dmadev_stats - running statistics.
+ */
+struct rte_dmadev_stats {
+	uint64_t submitted_count;
+	/**< Count of operations which were submitted to hardware. */
+	uint64_t completed_fail_count;
+	/**< Count of operations which failed to complete. */
+	uint64_t completed_count;
+	/**< Count of operations which successfully complete. */
+};
+
+#define RTE_DMADEV_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dmadev_stats
+ *   object.
+ *
+ * @return
+ *   - =0: Successfully retrieve stats.
+ *   - <0: Failure to retrieve stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   - =0: Successfully reset stats.
+ *   - <0: Failure to reset stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Non-zero otherwise.
+ */
+__rte_experimental
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger the dmadev self test.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - 0: Selftest successful.
+ *   - -ENOTSUP if the device doesn't support selftest
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_selftest(uint16_t dev_id);
+
+/**
+ * rte_dma_status_code - DMA transfer result status code defines.
+ */
+enum rte_dma_status_code {
+	RTE_DMA_STATUS_SUCCESSFUL = 0,
+	/**< The operation completed successfully. */
+	RTE_DMA_STATUS_USRER_ABORT,
+	/**< The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPTED,
+	/**< The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/**< The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/**< The operation failed to complete due invalid destination
+	 * address.
+	 */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/**< The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/**< The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/**< The operation failed to complete due bus err. */
+	RTE_DMA_STATUS_DATA_POISION,
+	/**< The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/**< The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/**< The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the memory-to-device/
+	 * device-to-memory/device-to-device transfer scenario.
+	 */
+	RTE_DMA_STATUS_UNKNOWN = 0x100,
+	/**< The operation failed to complete due unknown reason.
+	 * The initial value is 256, which reserves space for future errors.
+	 */
+};
+
+/**
+ * rte_dmadev_sge - can hold scatter DMA operation request entry.
+ */
+struct rte_dmadev_sge {
+	rte_iova_t addr; /**< The DMA operation address. */
+	uint32_t length; /**< The DMA operation length. */
+};
+
+#include "rte_dmadev_core.h"
+
+/* DMA flags to augment operation preparation. */
+#define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
+/**< DMA fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ * If the specify DMA HW works in-order (it means it has default fence between
+ * operations), this flag could be NOP.
+ *
+ * @see rte_dmadev_copy()
+ * @see rte_dmadev_copy_sg()
+ * @see rte_dmadev_fill()
+ */
+
+#define RTE_DMA_OP_FLAG_SUBMIT	(1ull << 1)
+/**< DMA submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ */
+
+#define RTE_DMA_OP_FLAG_LLC	(1ull << 2)
+/**< DMA write data to low level cache hint.
+ * Used for performance optimization, this is just a hint, and there is no
+ * capability bit for this, driver should not return error if this flag was set.
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy job.
+ *   - <0: Error code returned by the driver copy function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+		uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+#endif
+
+	return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter list copy operation to be performed by hardware, if
+ * the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell
+ * to begin this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The pointer of source scatter entry array.
+ * @param dst
+ *   The pointer of destination scatter entry array.
+ * @param nb_src
+ *   The number of source scatter entry.
+ * @param nb_dst
+ *   The number of destination scatter entry.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy scatterlist job.
+ *   - <0: Error code returned by the driver copy scatterlist function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
+		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
+		   uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+#endif
+
+	return (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued fill job.
+ *   - <0: Error code returned by the driver fill function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
+		rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+#endif
+
+	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dmadev_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   - =0: Successfully trigger hardware.
+ *   - <0: Failure to trigger hardware.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+#endif
+
+	return (*dev->submit)(dev, vchan);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed. This return value
+ *   must be less than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		     uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    nb_cpls == 0)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been completed, and the
+ * operations result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   The error code of operations that completed.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that completed. This return value must be less
+ *   than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
+			    const uint16_t nb_cpls, uint16_t *last_idx,
+			    enum rte_dma_status_code *status)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    nb_cpls == 0 || status == NULL)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_H_ */
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000..0122f67
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#ifndef _RTE_DMADEV_CORE_H_
+#define _RTE_DMADEV_CORE_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device internal header.
+ *
+ * This header contains internal data types, that are used by the DMA devices
+ * in order to expose their ops to the class.
+ *
+ * Applications should not use these API directly.
+ *
+ */
+
+struct rte_dmadev;
+
+typedef int (*rte_dmadev_info_get_t)(const struct rte_dmadev *dev,
+				     struct rte_dmadev_info *dev_info,
+				     uint32_t info_sz);
+/**< @internal Used to get device information of a device. */
+
+typedef int (*rte_dmadev_configure_t)(struct rte_dmadev *dev,
+				      const struct rte_dmadev_conf *dev_conf);
+/**< @internal Used to configure a device. */
+
+typedef int (*rte_dmadev_start_t)(struct rte_dmadev *dev);
+/**< @internal Used to start a configured device. */
+
+typedef int (*rte_dmadev_stop_t)(struct rte_dmadev *dev);
+/**< @internal Used to stop a configured device. */
+
+typedef int (*rte_dmadev_close_t)(struct rte_dmadev *dev);
+/**< @internal Used to close a configured device. */
+
+typedef int (*rte_dmadev_vchan_setup_t)(struct rte_dmadev *dev,
+				const struct rte_dmadev_vchan_conf *conf);
+/**< @internal Used to allocate and set up a virtual DMA channel. */
+
+typedef int (*rte_dmadev_stats_get_t)(const struct rte_dmadev *dev,
+			uint16_t vchan, struct rte_dmadev_stats *stats,
+			uint32_t stats_sz);
+/**< @internal Used to retrieve basic statistics. */
+
+typedef int (*rte_dmadev_stats_reset_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to reset basic statistics. */
+
+typedef int (*rte_dmadev_dump_t)(const struct rte_dmadev *dev, FILE *f);
+/**< @internal Used to dump internal information. */
+
+typedef int (*rte_dmadev_selftest_t)(uint16_t dev_id);
+/**< @internal Used to start dmadev selftest. */
+
+typedef int (*rte_dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 rte_iova_t src, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a copy operation. */
+
+typedef int (*rte_dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
+				    const struct rte_dmadev_sge *src,
+				    const struct rte_dmadev_sge *dst,
+				    uint16_t nb_src, uint16_t nb_dst,
+				    uint64_t flags);
+/**< @internal Used to enqueue a scatter list copy operation. */
+
+typedef int (*rte_dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 uint64_t pattern, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a fill operation. */
+
+typedef int (*rte_dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to trigger hardware to begin working. */
+
+typedef uint16_t (*rte_dmadev_completed_t)(struct rte_dmadev *dev,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+/**< @internal Used to return number of successful completed operations. */
+
+typedef uint16_t (*rte_dmadev_completed_status_t)(struct rte_dmadev *dev,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+/**< @internal Used to return number of completed operations. */
+
+/**
+ * Possible states of a DMA device.
+ */
+enum rte_dmadev_state {
+	RTE_DMADEV_UNUSED = 0,
+	/**< Device is unused before being probed. */
+	RTE_DMADEV_ATTACHED,
+	/**< Device is attached when allocated in probing. */
+};
+
+/**
+ * DMA device operations function pointer table
+ */
+struct rte_dmadev_ops {
+	rte_dmadev_info_get_t dev_info_get;
+	rte_dmadev_configure_t dev_configure;
+	rte_dmadev_start_t dev_start;
+	rte_dmadev_stop_t dev_stop;
+	rte_dmadev_close_t dev_close;
+	rte_dmadev_vchan_setup_t vchan_setup;
+	rte_dmadev_stats_get_t stats_get;
+	rte_dmadev_stats_reset_t stats_reset;
+	rte_dmadev_dump_t dev_dump;
+	rte_dmadev_selftest_t dev_selftest;
+};
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_dmadev_data {
+	void *dev_private;
+	/**< PMD-specific private data.
+	 * This is a copy of the 'dev_private' field in the 'struct rte_dmadev'
+	 * from primary process, it is used by the secondary process to get
+	 * dev_private information.
+	 */
+	uint16_t dev_id; /**< Device [external] identifier. */
+	char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ *
+ * The dataplane APIs are located at the beginning of the structure, along
+ * with the pointer to where all the data elements for the particular device
+ * are stored in shared memory. This split scheme allows the function pointer
+ * and driver data to be per-process, while the actual configuration data for
+ * the device is shared.
+ * And the 'dev_private' field was placed in the first cache line to optimize
+ * performance because the PMD driver mainly depends on this field.
+ */
+struct rte_dmadev {
+	rte_dmadev_copy_t copy;
+	rte_dmadev_copy_sg_t copy_sg;
+	rte_dmadev_fill_t fill;
+	rte_dmadev_submit_t submit;
+	rte_dmadev_completed_t completed;
+	rte_dmadev_completed_status_t completed_status;
+	void *reserved_ptr; /**< Reserved for future IO function. */
+	void *dev_private;
+	/**< PMD-specific private data.
+	 *
+	 * - If is the primary process, after dmadev allocated by
+	 * rte_dmadev_pmd_allocate(), the PCI/SoC device probing should
+	 * initialize this field, and copy it's value to the 'dev_private'
+	 * field of 'struct rte_dmadev_data' which pointer by 'data' filed.
+	 *
+	 * - If is the secondary process, dmadev framework will initialize this
+	 * field by copy from 'dev_private' field of 'struct rte_dmadev_data'
+	 * which initialized by primary process.
+	 *
+	 * @note It's the primary process responsibility to deinitialize this
+	 * field after invoke rte_dmadev_pmd_release() in the PCI/SoC device
+	 * removing stage.
+	 */
+	struct rte_dmadev_data *data; /**< Pointer to device data. */
+	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
+	struct rte_device *device;
+	/**< Device info which supplied during device initialization. */
+	enum rte_dmadev_state state; /**< Flag indicating the device state. */
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+} __rte_cache_aligned;
+
+extern struct rte_dmadev rte_dmadevices[];
+
+#endif /* _RTE_DMADEV_CORE_H_ */
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000..45141f9
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_PMD_H_
+#define _RTE_DMADEV_PMD_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device PMD APIs
+ *
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param dev
+ *   Device to be released.
+ *
+ * @return
+ *   - 0 on success, negative on error
+ */
+__rte_internal
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev);
+
+/**
+ * @internal
+ * Return the DMA device based on the device name.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_PMD_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000..4bb1177
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,36 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dmadev_close;
+	rte_dmadev_completed;
+	rte_dmadev_completed_status;
+	rte_dmadev_configure;
+	rte_dmadev_copy;
+	rte_dmadev_copy_sg;
+	rte_dmadev_count;
+	rte_dmadev_dump;
+	rte_dmadev_fill;
+	rte_dmadev_info_get;
+	rte_dmadev_is_valid_dev;
+	rte_dmadev_selftest;
+	rte_dmadev_start;
+	rte_dmadev_stats_get;
+	rte_dmadev_stats_reset;
+	rte_dmadev_stop;
+	rte_dmadev_submit;
+	rte_dmadev_vchan_setup;
+
+	local: *;
+};
+
+INTERNAL {
+        global:
+
+	rte_dmadevices;
+	rte_dmadev_get_device_by_name;
+	rte_dmadev_pmd_allocate;
+	rte_dmadev_pmd_release;
+
+	local: *;
+};
+
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4..68d239f 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -60,6 +60,7 @@ libraries = [
         'bpf',
         'graph',
         'node',
+        'dmadev',
 ]
 
 if is_windows
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v6] dmadev: introduce DMA device library
  2021-07-19  3:29 ` [dpdk-dev] [PATCH v6] " Chengwen Feng
@ 2021-07-19  6:21   ` Jerin Jacob
  2021-07-19 13:20     ` fengchengwen
  0 siblings, 1 reply; 339+ messages in thread
From: Jerin Jacob @ 2021-07-19  6:21 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: Thomas Monjalon, Ferruh Yigit, Richardson, Bruce, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On Mon, Jul 19, 2021 at 9:02 AM Chengwen Feng <fengchengwen@huawei.com> wrote:
>
> This patch introduce 'dmadevice' which is a generic type of DMA
> device.
>
> The APIs of dmadev library exposes some generic operations which can
> enable configuration and I/O with the DMA devices.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>


The API specification aspects look pretty good to me.

Some minor comments are below. You can add my Acked by on future version
API header file where you will split the patch.


> diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
> new file mode 100644
> index 0000000..ecac281
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev.h
> @@ -0,0 +1,1025 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + * Copyright(c) 2021 Intel Corporation.
> + * Copyright(c) 2021 Marvell International Ltd.
> + * Copyright(c) 2021 SmartShare Systems.
> + */
> +
> +#ifndef _RTE_DMADEV_H_
> +#define _RTE_DMADEV_H_
> +
> +/**
> + * @file rte_dmadev.h
> + *
> + * RTE DMA (Direct Memory Access) device APIs.
> + *
> + * The DMA framework is built on the following model:
> + *
> + *     ---------------   ---------------       ---------------
> + *     | virtual DMA |   | virtual DMA |       | virtual DMA |
> + *     | channel     |   | channel     |       | channel     |
> + *     ---------------   ---------------       ---------------
> + *            |                |                      |
> + *            ------------------                      |
> + *                     |                              |
> + *               ------------                    ------------
> + *               |  dmadev  |                    |  dmadev  |
> + *               ------------                    ------------
> + *                     |                              |
> + *            ------------------               ------------------
> + *            | HW-DMA-channel |               | HW-DMA-channel |
> + *            ------------------               ------------------
> + *                     |                              |
> + *                     --------------------------------
> + *                                     |
> + *                           ---------------------
> + *                           | HW-DMA-Controller |
> + *                           ---------------------
> + *
> + * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
> + * each HW-DMA-channel should be represented by a dmadev.
> + *
> + * The dmadev could create multiple virtual DMA channels, each virtual DMA
> + * channel represents a different transfer context. The DMA operation request
> + * must be submitted to the virtual DMA channel. e.g. Application could create
> + * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
> + * virtual DMA channel 1 for memory-to-device transfer scenario.
> + *
> + * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
> + * PCI/SoC device probing phase performed at EAL initialization time. And could
> + * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
> + * phase.
> + *
> + * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
> + * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
> + *
> + * The functions exported by the dmadev API to setup a device designated by its
> + * device identifier must be invoked in the following order:
> + *     - rte_dmadev_configure()
> + *     - rte_dmadev_vchan_setup()
> + *     - rte_dmadev_start()
> + *
> + * Then, the application can invoke dataplane APIs to process jobs.
> + *
> + * If the application wants to change the configuration (i.e. invoke
> + * rte_dmadev_configure() or rte_dmadev_vchan_setup()), it must invoke
> + * rte_dmadev_stop() first to stop the device and then do the reconfiguration
> + * before invoking rte_dmadev_start() again. The dataplane APIs should not be
> + * invoked when the device is stopped.
> + *
> + * Finally, an application can close a dmadev by invoking the
> + * rte_dmadev_close() function.
> + *
> + * The dataplane APIs include two parts:
> + * The first part is the submission of operation requests:
> + *     - rte_dmadev_copy()
> + *     - rte_dmadev_copy_sg()
> + *     - rte_dmadev_fill()
> + *     - rte_dmadev_submit()
> + *
> + * These APIs could work with different virtual DMA channels which have
> + * different contexts.
> + *
> + * The first three APIs are used to submit the operation request to the virtual
> + * DMA channel, if the submission is successful, a uint16_t ring_idx is
> + * returned, otherwise a negative number is returned.
> + *
> + * The last API was used to issue doorbell to hardware, and also there are flags
> + * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
> + * same work.
> + *
> + * The second part is to obtain the result of requests:
> + *     - rte_dmadev_completed()
> + *         - return the number of operation requests completed successfully.
> + *     - rte_dmadev_completed_status()
> + *         - return the number of operation requests completed.
> + *
> + * @note If the dmadev works in silent mode, application does not invoke the

in slient mode (@see RTE_DMA_DEV_CAPA_SILENT)

> + * above two completed APIs.
> + *
> + * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
> + * rte_dmadev_fill()) returned, the rules are as follows:
> + *     - ring_idx for each virtual DMA channel are independent.
> + *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
> + *       when it reach UINT16_MAX, it wraps back to zero.
> + *     - This ring_idx can be used by applications to track per-operation
> + *       metadata in an application-defined circular ring.
> + *     - The initial ring_idx of a virtual DMA channel is zero, after the
> + *       device is stopped, the ring_idx needs to be reset to zero.
> + *
> + * One example:
> + *     - step-1: start one dmadev
> + *     - step-2: enqueue a copy operation, the ring_idx return is 0
> + *     - step-3: enqueue a copy operation again, the ring_idx return is 1
> + *     - ...
> + *     - step-101: stop the dmadev
> + *     - step-102: start the dmadev
> + *     - step-103: enqueue a copy operation, the cookie return is 0
> + *     - ...
> + *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
> + *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
> + *     - ...
> + *
> + * The DMA operation address used in enqueue APIs (i.e. rte_dmadev_copy(),
> + * rte_dmadev_copy_sg(), rte_dmadev_fill()) defined as rte_iova_t type. The
> + * dmadev supports two types of address: memory address and device address.
> + *
> + * - memory address: the source and destination address of the memory-to-memory
> + * transfer type, or the source address of the memory-to-device transfer type,
> + * or the destination address of the device-to-memory transfer type.
> + * @note If the device support SVA, the memory address can be any VA address,

If the device supports SVA (@see RTE_DMA_DEV_CAPA_SVA)

> + * otherwise it must be an IOVA address.
> + *
> + * - device address: the source and destination address of the device-to-device
> + * transfer type, or the source address of the device-to-memory transfer type,
> + * or the destination address of the memory-to-device transfer type.
> + *
> + * By default, all the functions of the dmadev API exported by a PMD are
> + * lock-free functions which assume to not be invoked in parallel on different
> + * logical cores to work on the same target dmadev object.
> + * @note Different virtual DMA channels on the same dmadev *DO NOT* support
> + * parallel invocation because there virtual DMA channels share the same

their?

> + * HW-DMA-channel.
> + *
> + */
> +
> +#include <rte_common.h>
> +#include <rte_compat.h>
> +#include <rte_dev.h>
> +#include <rte_errno.h>
> +#include <rte_memory.h>
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +#define RTE_DMADEV_NAME_MAX_LEN        RTE_DEV_NAME_MAX_LEN
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * @param dev_id
> + *   DMA device index.
> + *
> + * @return
> + *   - If the device index is valid (true) or not (false).
> + */
> +__rte_experimental
> +bool
> +rte_dmadev_is_valid_dev(uint16_t dev_id);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Get the total number of DMA devices that have been successfully
> + * initialised.
> + *
> + * @return
> + *   The total number of usable DMA devices.
> + */
> +__rte_experimental
> +uint16_t
> +rte_dmadev_count(void);
> +
> +/* Enumerates DMA device capabilities. */
> +#define RTE_DMA_DEV_CAPA_MEM_TO_MEM    (1ull << 0)
> +/**< DMA device support memory-to-memory transfer.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +
> +#define RTE_DMA_DEV_CAPA_MEM_TO_DEV    (1ull << 1)
> +/**< DMA device support memory-to-device transfer.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + * @see struct rte_dmadev_port_param::port_type
> + */
> +
> +#define RTE_DMA_DEV_CAPA_DEV_TO_MEM    (1ull << 2)
> +/**< DMA device support device-to-memory transfer.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + * @see struct rte_dmadev_port_param::port_type
> + */
> +
> +#define RTE_DMA_DEV_CAPA_DEV_TO_DEV    (1ull << 3)
> +/**< DMA device support device-to-device transfer.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + * @see struct rte_dmadev_port_param::port_type
> + */
> +
> +#define RTE_DMA_DEV_CAPA_SVA           (1ull << 4)
> +/**< DMA device support SVA which could use VA as DMA address.
> + * If device support SVA then application could pass any VA address like memory
> + * from rte_malloc(), rte_memzone(), malloc, stack memory.
> + * If device don't support SVA, then application should pass IOVA address which
> + * from rte_malloc(), rte_memzone().
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +
> +#define RTE_DMA_DEV_CAPA_SILENT                (1ull << 5)
> +/**< DMA device support work in silent mode.
> + * In this mode, application don't required to invoke rte_dmadev_completed*()
> + * API.
> + *
> + * @see struct rte_dmadev_conf::silent_mode
> + */
> +
> +#define RTE_DMA_DEV_CAPA_OPS_COPY      (1ull << 32)
> +/**< DMA device support copy ops.
> + * This capability start with index of 32, so that it could leave gap between
> + * normal capability and ops capability.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +
> +#define RTE_DMA_DEV_CAPA_OPS_COPY_SG   (1ull << 33)
> +/**< DMA device support scatter-list copy ops.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +
> +#define RTE_DMA_DEV_CAPA_OPS_FILL      (1ull << 34)
> +/**< DMA device support fill ops.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +
> +/**
> + * A structure used to retrieve the information of a DMA device.
> + */
> +struct rte_dmadev_info {
> +       struct rte_device *device; /**< Generic Device information. */
> +       uint64_t dev_capa; /**< Device capabilities (RTE_DMA_DEV_CAPA_*). */
> +       uint16_t max_vchans;
> +       /**< Maximum number of virtual DMA channels supported. */
> +       uint16_t max_desc;
> +       /**< Maximum allowed number of virtual DMA channel descriptors. */
> +       uint16_t min_desc;
> +       /**< Minimum allowed number of virtual DMA channel descriptors. */
> +       uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Retrieve information of a DMA device.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param[out] dev_info
> + *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
> + *   information of the device.
> + *
> + * @return
> + *   - =0: Success, driver updates the information of the DMA device.
> + *   - <0: Error code returned by the driver info get function.
> + *
> + */
> +__rte_experimental
> +int
> +rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
> +
> +/**
> + * A structure used to configure a DMA device.
> + */
> +struct rte_dmadev_conf {
> +       uint16_t max_vchans;
> +       /**< Maximum number of virtual DMA channel to use.
> +        * This value cannot be greater than the field 'max_vchans' of struct
> +        * rte_dmadev_info which get from rte_dmadev_info_get().
> +        */
> +       uint8_t silent_mode;

bool instead of uint8_t?

> +       /**< Indicates whether to work in silent mode.
> +        * 0-default mode, 1-silent mode.
> +        *
> +        * @see RTE_DMA_DEV_CAPA_SILENT
> +        */
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Configure a DMA device.
> + *
> + * This function must be invoked first before any other function in the
> + * API. This function can also be re-invoked when a device is in the
> + * stopped state.
> + *
> + * @param dev_id
> + *   The identifier of the device to configure.
> + * @param dev_conf
> + *   The DMA device configuration structure encapsulated into rte_dmadev_conf
> + *   object.
> + *
> + * @return
> + *   - =0: Success, device configured.
> + *   - <0: Error code returned by the driver configuration function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Start a DMA device.
> + *
> + * The device start step is the last one and consists of setting the DMA
> + * to start accepting jobs.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - =0: Success, device started.
> + *   - <0: Error code returned by the driver start function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_start(uint16_t dev_id);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Stop a DMA device.
> + *
> + * The device can be restarted with a call to rte_dmadev_start().
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - =0: Success, device stopped.
> + *   - <0: Error code returned by the driver stop function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_stop(uint16_t dev_id);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Close a DMA device.
> + *
> + * The device cannot be restarted after this call.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *  - =0: Successfully close device
> + *  - <0: Failure to close device
> + */
> +__rte_experimental
> +int
> +rte_dmadev_close(uint16_t dev_id);
> +
> +/**
> + * rte_dma_direction - DMA transfer direction defines.
> + */
> +enum rte_dma_direction {
> +       RTE_DMA_DIR_MEM_TO_MEM = 0,

No need to give = 0 as it starts with 0.

> +       /**< DMA transfer direction - from memory to memory.
> +        *
> +        * @see struct rte_dmadev_vchan_conf::direction
> +        */
> +       RTE_DMA_DIR_MEM_TO_DEV = 1,

No need to give = 1.

> +       /**< DMA transfer direction - from memory to device.
> +        * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs

We can remove ARM. It can be RISC-V too. ;-)


> +        * through the PCIE interface. In this case, the ARM SoCs works in

PCIe

> +        * EP(endpoint) mode, it could initiate a DMA move request from memory
> +        * (which is ARM memory) to device (which is x86 host memory).

to the device.

> +        *
> +        * @see struct rte_dmadev_vchan_conf::direction
> +        */
> +       RTE_DMA_DIR_DEV_TO_MEM = 2,
> +       /**< DMA transfer direction - from device to memory.
> +        * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs
> +        * through the PCIE interface. In this case, the ARM SoCs works in
> +        * EP(endpoint) mode, it could initiate a DMA move request from device
> +        * (which is x86 host memory) to memory (which is ARM memory).
> +        *
> +        * @see struct rte_dmadev_vchan_conf::direction
> +        */
> +       RTE_DMA_DIR_DEV_TO_DEV = 3,
> +       /**< DMA transfer direction - from device to device.
> +        * In a typical scenario, ARM SoCs are installed on x86 servers as iNICs
> +        * through the PCIE interface. In this case, the ARM SoCs works in
> +        * EP(endpoint) mode, it could initiate a DMA move request from device
> +        * (which is x86 host memory) to device (which is another x86 host
> +        * memory).
> +        *
> +        * @see struct rte_dmadev_vchan_conf::direction
> +        */
> +       RTE_DMA_DIR_BUTT

# Doxygen comment is missing
# Typically we use RTE_DMA_DIR_MAX.
# If there is no real need for this please remove this as it can break
ABI if we add more
items.


> +};
> +
> +/**
> + * enum rte_dmadev_port_type - DMA access port type defines.
> + *
> + * @see struct rte_dmadev_port_param::port_type
> + */
> +enum rte_dmadev_port_type {
> +       RTE_DMADEV_PORT_NONE = 0,

No need for = 0

> +       RTE_DMADEV_PORT_PCIE, /**< The DMA access port is PCIE. */
> +       RTE_DMADEV_PORT_BUTT


Same as the above comment for RTE_DMA_DIR_BUTT

> +};
> +
> +/**
> + * A structure used to descript DMA access port parameters.
> + *
> + * @see struct rte_dmadev_vchan_conf::src_port
> + * @see struct rte_dmadev_vchan_conf::dst_port
> + */
> +struct rte_dmadev_port_param {
> +       enum rte_dmadev_port_type port_type;
> +       /**< The device access port type.
> +        * @see enum rte_dmadev_port_type
> +        */
> +       union {
> +               /** PCIE access port parameter.
> +                *
> +                * The following model shows SoC's PCIE module connects to
> +                * multiple PCIE hosts and multiple endpoints. The PCIE module
> +                * has an integrate DMA controller.
> +                * If the DMA wants to access the memory of host A, it can be
> +                * initiated by PF1 in core0, or by VF0 of PF0 in core0.
> +                *
> +                * System Bus
> +                *    |     ----------PCIE module----------
> +                *    |     Bus
> +                *    |     Interface
> +                *    |     -----        ------------------
> +                *    |     |   |        | PCIE Core0     |
> +                *    |     |   |        |                |        -----------
> +                *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
> +                *    |     |   |--------|        |- VF-1 |--------| Root    |
> +                *    |     |   |        |   PF-1         |        | Complex |
> +                *    |     |   |        |   PF-2         |        -----------
> +                *    |     |   |        ------------------
> +                *    |     |   |
> +                *    |     |   |        ------------------
> +                *    |     |   |        | PCIE Core1     |
> +                *    |     |   |        |                |        -----------
> +                *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
> +                *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
> +                *    |     |   |        |        |- VF-1 |        | Complex |
> +                *    |     |   |        |   PF-2         |        -----------
> +                *    |     |   |        ------------------
> +                *    |     |   |
> +                *    |     |   |        ------------------
> +                *    |     |DMA|        |                |        ------
> +                *    |     |   |        |                |--------| EP |
> +                *    |     |   |--------| PCIE Core2     |        ------
> +                *    |     |   |        |                |        ------
> +                *    |     |   |        |                |--------| EP |
> +                *    |     |   |        |                |        ------
> +                *    |     -----        ------------------


This diagram does not show correctly in doxygen. Please fix it.

> +                *
> +                * The following structure is used to describe the above access
> +                * port.
> +                *
> +                * @note If some fields can not be supported by the
> +                * hardware/driver, then the driver ignores those fields.
> +                * Please check driver-specific documentation for limitations
> +                * and capablites.
> +                */
> +               struct {
> +                       uint64_t coreid : 4; /**< PCIE core id used. */
> +                       uint64_t pfid : 8; /**< PF id used. */
> +                       uint64_t vfen : 1; /**< VF enable bit. */
> +                       uint64_t vfid : 16; /**< VF id used. */
> +                       uint64_t pasid : 20;
> +                       /**< The pasid filed in TLP packet. */
> +                       uint64_t attr : 3;
> +                       /**< The attributes filed in TLP packet. */
> +                       uint64_t ph : 2;
> +                       /**< The processing hint filed in TLP packet. */
> +                       uint64_t st : 16;
> +                       /**< The steering tag filed in TLP packet. */
> +               } pcie;
> +       };
> +       uint64_t reserved[2]; /**< Reserved for future fields. */
> +};
> +
> +/**
> + * A structure used to configure a virtual DMA channel.
> + */
> +struct rte_dmadev_vchan_conf {
> +       enum rte_dma_direction direction;
> +       /**< Transfer direction
> +        * @see enum rte_dma_direction
> +        */
> +       uint16_t nb_desc;
> +       /**< Number of descriptor for the virtual DMA channel */
> +       struct rte_dmadev_port_param src_port;
> +       /**< 1) Used to describes the device access port parameter in the
> +        * device-to-memory transfer scenario.
> +        * 2) Used to describes the source device access port parameter in the
> +        * device-to-device transfer scenario.
> +        * @see struct rte_dmadev_port_param
> +        */
> +       struct rte_dmadev_port_param dst_port;
> +       /**< 1) Used to describes the device access port parameter in the
> +        * memory-to-device transfer scenario.
> +        * 2) Used to describes the destination device access port parameter in
> +        * the device-to-device transfer scenario.
> +        * @see struct rte_dmadev_port_param
> +        */
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Allocate and set up a virtual DMA channel.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param conf
> + *   The virtual DMA channel configuration structure encapsulated into
> + *   rte_dmadev_vchan_conf object.
> + *
> + * @return
> + *   - >=0: Allocate success, it is the virtual DMA channel id. This value must
> + *          be less than the field 'max_vchans' of struct rte_dmadev_conf
> + *          which configured by rte_dmadev_configure().
> + *   - <0: Error code returned by the driver virtual channel setup function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_vchan_setup(uint16_t dev_id,
> +                      const struct rte_dmadev_vchan_conf *conf);
> +
> +/**
> + * rte_dmadev_stats - running statistics.
> + */
> +struct rte_dmadev_stats {
> +       uint64_t submitted_count;
> +       /**< Count of operations which were submitted to hardware. */
> +       uint64_t completed_fail_count;
> +       /**< Count of operations which failed to complete. */
> +       uint64_t completed_count;
> +       /**< Count of operations which successfully complete. */
> +};
> +
> +#define RTE_DMADEV_ALL_VCHAN   0xFFFFu
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Retrieve basic statistics of a or all virtual DMA channel(s).
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
> + * @param[out] stats
> + *   The basic statistics structure encapsulated into rte_dmadev_stats
> + *   object.
> + *
> + * @return
> + *   - =0: Successfully retrieve stats.
> + *   - <0: Failure to retrieve stats.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
> +                    struct rte_dmadev_stats *stats);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Reset basic statistics of a or all virtual DMA channel(s).
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
> + *
> + * @return
> + *   - =0: Successfully reset stats.
> + *   - <0: Failure to reset stats.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Dump DMA device info.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param f
> + *   The file to write the output to.
> + *
> + * @return
> + *   0 on success. Non-zero otherwise.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_dump(uint16_t dev_id, FILE *f);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Trigger the dmadev self test.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - 0: Selftest successful.
> + *   - -ENOTSUP if the device doesn't support selftest
> + *   - other values < 0 on failure.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_selftest(uint16_t dev_id);
> +
> +/**
> + * rte_dma_status_code - DMA transfer result status code defines.
> + */
> +enum rte_dma_status_code {
> +       RTE_DMA_STATUS_SUCCESSFUL = 0,

No need for = 0

> +       /**< The operation completed successfully. */
> +       RTE_DMA_STATUS_USRER_ABORT,
> +       /**< The operation failed to complete due abort by user.
> +        * This is mainly used when processing dev_stop, user could modidy the
> +        * descriptors (e.g. change one bit to tell hardware abort this job),
> +        * it allows outstanding requests to be complete as much as possible,
> +        * so reduce the time to stop the device.
> +        */
> +       RTE_DMA_STATUS_NOT_ATTEMPTED,
> +       /**< The operation failed to complete due to following scenarios:
> +        * The jobs in a particular batch are not attempted because they
> +        * appeared after a fence where a previous job failed. In some HW
> +        * implementation it's possible for jobs from later batches would be
> +        * completed, though, so report the status from the not attempted jobs
> +        * before reporting those newer completed jobs.
> +        */
> +       RTE_DMA_STATUS_INVALID_SRC_ADDR,
> +       /**< The operation failed to complete due invalid source address. */
> +       RTE_DMA_STATUS_INVALID_DST_ADDR,
> +       /**< The operation failed to complete due invalid destination
> +        * address.
> +        */
> +       RTE_DMA_STATUS_INVALID_LENGTH,
> +       /**< The operation failed to complete due invalid length. */
> +       RTE_DMA_STATUS_INVALID_OPCODE,
> +       /**< The operation failed to complete due invalid opcode.
> +        * The DMA descriptor could have multiple format, which are
> +        * distinguished by the opcode field.
> +        */
> +       RTE_DMA_STATUS_BUS_ERROR,
> +       /**< The operation failed to complete due bus err. */
> +       RTE_DMA_STATUS_DATA_POISION,
> +       /**< The operation failed to complete due data poison. */
> +       RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
> +       /**< The operation failed to complete due descriptor read error. */
> +       RTE_DMA_STATUS_DEV_LINK_ERROR,
> +       /**< The operation failed to complete due device link error.
> +        * Used to indicates that the link error in the memory-to-device/
> +        * device-to-memory/device-to-device transfer scenario.
> +        */
> +       RTE_DMA_STATUS_UNKNOWN = 0x100,
> +       /**< The operation failed to complete due unknown reason.
> +        * The initial value is 256, which reserves space for future errors.
> +        */
> +};
> +
> +/**
> + * rte_dmadev_sge - can hold scatter DMA operation request entry.
> + */
> +struct rte_dmadev_sge {
> +       rte_iova_t addr; /**< The DMA operation address. */
> +       uint32_t length; /**< The DMA operation length. */
> +};
> +
> +#include "rte_dmadev_core.h"
> +
> +/* DMA flags to augment operation preparation. */
> +#define RTE_DMA_OP_FLAG_FENCE  (1ull << 0)
> +/**< DMA fence flag.
> + * It means the operation with this flag must be processed only after all
> + * previous operations are completed.
> + * If the specify DMA HW works in-order (it means it has default fence between
> + * operations), this flag could be NOP.
> + *
> + * @see rte_dmadev_copy()
> + * @see rte_dmadev_copy_sg()
> + * @see rte_dmadev_fill()
> + */
> +
> +#define RTE_DMA_OP_FLAG_SUBMIT (1ull << 1)
> +/**< DMA submit flag.
> + * It means the operation with this flag must issue doorbell to hardware after
> + * enqueued jobs.
> + */
> +
> +#define RTE_DMA_OP_FLAG_LLC    (1ull << 2)
> +/**< DMA write data to low level cache hint.
> + * Used for performance optimization, this is just a hint, and there is no
> + * capability bit for this, driver should not return error if this flag was set.
> + */
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a copy operation onto the virtual DMA channel.
> + *
> + * This queues up a copy operation to be performed by hardware, if the 'flags'
> + * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
> + * this operation, otherwise do not trigger doorbell.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param src
> + *   The address of the source buffer.
> + * @param dst
> + *   The address of the destination buffer.
> + * @param length
> + *   The length of the data to be copied.
> + * @param flags
> + *   An flags for this operation.
> + *   @see RTE_DMA_OP_FLAG_*
> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued copy job.
> + *   - <0: Error code returned by the driver copy function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
> +               uint32_t length, uint64_t flags)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans)
> +               return -EINVAL;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
> +#endif
> +
> +       return (*dev->copy)(dev, vchan, src, dst, length, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a scatter list copy operation onto the virtual DMA channel.
> + *
> + * This queues up a scatter list copy operation to be performed by hardware, if
> + * the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell
> + * to begin this operation, otherwise do not trigger doorbell.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param src
> + *   The pointer of source scatter entry array.
> + * @param dst
> + *   The pointer of destination scatter entry array.
> + * @param nb_src
> + *   The number of source scatter entry.
> + * @param nb_dst
> + *   The number of destination scatter entry.
> + * @param flags
> + *   An flags for this operation.
> + *   @see RTE_DMA_OP_FLAG_*
> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued copy scatterlist job.
> + *   - <0: Error code returned by the driver copy scatterlist function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
> +                  struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
> +                  uint64_t flags)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans ||
> +           src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
> +               return -EINVAL;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
> +#endif
> +
> +       return (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a fill operation onto the virtual DMA channel.
> + *
> + * This queues up a fill operation to be performed by hardware, if the 'flags'
> + * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
> + * this operation, otherwise do not trigger doorbell.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param pattern
> + *   The pattern to populate the destination buffer with.
> + * @param dst
> + *   The address of the destination buffer.
> + * @param length
> + *   The length of the destination buffer.
> + * @param flags
> + *   An flags for this operation.
> + *   @see RTE_DMA_OP_FLAG_*
> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued fill job.
> + *   - <0: Error code returned by the driver fill function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
> +               rte_iova_t dst, uint32_t length, uint64_t flags)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans)
> +               return -EINVAL;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
> +#endif
> +
> +       return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Trigger hardware to begin performing enqueued operations.
> + *
> + * This API is used to write the "doorbell" to the hardware to trigger it
> + * to begin the operations previously enqueued by rte_dmadev_copy/fill().
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + *
> + * @return
> + *   - =0: Successfully trigger hardware.
> + *   - <0: Failure to trigger hardware.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans)
> +               return -EINVAL;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
> +#endif
> +
> +       return (*dev->submit)(dev, vchan);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Returns the number of operations that have been successfully completed.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param nb_cpls
> + *   The maximum number of completed operations that can be processed.
> + * @param[out] last_idx
> + *   The last completed operation's index.
> + *   If not required, NULL can be passed in.
> + * @param[out] has_error
> + *   Indicates if there are transfer error.
> + *   If not required, NULL can be passed in.
> + *
> + * @return
> + *   The number of operations that successfully completed. This return value
> + *   must be less than or equal to the value of nb_cpls.
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
> +                    uint16_t *last_idx, bool *has_error)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       uint16_t idx;
> +       bool err;
> +
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans ||
> +           nb_cpls == 0)
> +               return 0;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
> +#endif
> +
> +       /* Ensure the pointer values are non-null to simplify drivers.
> +        * In most cases these should be compile time evaluated, since this is
> +        * an inline function.
> +        * - If NULL is explicitly passed as parameter, then compiler knows the
> +        *   value is NULL
> +        * - If address of local variable is passed as parameter, then compiler
> +        *   can know it's non-NULL.
> +        */
> +       if (last_idx == NULL)
> +               last_idx = &idx;
> +       if (has_error == NULL)
> +               has_error = &err;
> +
> +       *has_error = false;
> +       return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Returns the number of operations that have been completed, and the
> + * operations result may succeed or fail.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param nb_cpls
> + *   Indicates the size of status array.
> + * @param[out] last_idx
> + *   The last completed operation's index.
> + *   If not required, NULL can be passed in.
> + * @param[out] status
> + *   The error code of operations that completed.
> + *   @see enum rte_dma_status_code
> + *
> + * @return
> + *   The number of operations that completed. This return value must be less
> + *   than or equal to the value of nb_cpls.
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
> +                           const uint16_t nb_cpls, uint16_t *last_idx,
> +                           enum rte_dma_status_code *status)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       uint16_t idx;
> +
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans ||
> +           nb_cpls == 0 || status == NULL)
> +               return 0;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
> +#endif
> +
> +       if (last_idx == NULL)
> +               last_idx = &idx;
> +
> +       return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
> +}
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +

^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v7] dmadev: introduce DMA device library
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (9 preceding siblings ...)
  2021-07-19  3:29 ` [dpdk-dev] [PATCH v6] " Chengwen Feng
@ 2021-07-19 13:05 ` Chengwen Feng
  2021-07-20  1:14 ` [dpdk-dev] [PATCH v8] " Chengwen Feng
                   ` (18 subsequent siblings)
  29 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-07-19 13:05 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce 'dmadevice' which is a generic type of DMA
device.

The APIs of dmadev library exposes some generic operations which can
enable configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
v7:
* add rte_dmadev_get_dev_id API.
* fix typo.
* use the default macro assignment scheme.
* rename RTE_DMA_DEV_CAPA_* to RTE_DMADEV_CAPA_*.
* rename rte_dmadev_conf.silent_mode to enable_silent.
* add memset when get stats.
v6:
* delete fence capability.
* delete vchan_release ops.
* copy_sg direct use src/dst/nb_src/nb_dst as paramter.
* define rte_dma_direction, don't support multiple direction in the
  same vchan.
* fix segment fault when allocate.
* fix typo.
* fix comments format.
v5:
* add doxy-api-* file modify.
* use RTE_LOG_REGISTER_DEFAULT.
* fix typo.
* resolve some incorrect comments.
* fix some doxgen problem.
* fix version.map still hold rte_dmadev_completed_fails.
v4:
* replace xxx_complete_fails with xxx_completed_status.
* add SILENT capability, also a silent_mode in rte_dmadev_conf.
* add op_flag_llc for performance.
* rename dmadev_xxx_t to rte_dmadev_xxx_t to avoid namespace conflict.
* delete filed 'enqueued_count' from rte_dmadev_stats.
* make rte_dmadev hold 'dev_private' filed.
* add RTE_DMA_STATUS_NOT_ATTEMPED status code.
* rename RTE_DMA_STATUS_ACTIVE_DROP to RTE_DMA_STATUS_USER_ABORT.
* rename rte_dma_sg(e) to rte_dmadev_sg(e) to make sure all struct
  prefix with rte_dmadev.
* put the comment afterwards.
* fix some doxgen problem.
* delete macro RTE_DMADEV_VALID_DEV_ID_OR_RET and
  RTE_DMADEV_PTR_OR_ERR_RET.
* replace strlcpy with rte_strscpy.
* other minor modifications from review comment.
v3:
* rm reset and fill_sg ops.
* rm MT-safe capabilities.
* add submit flag.
* redefine rte_dma_sg to implement asymmetric copy.
* delete some reserved field for future use.
* rearrangement rte_dmadev/rte_dmadev_data struct.
* refresh rte_dmadev.h copyright.
* update vchan setup parameter.
* modified some inappropriate descriptions.
* arrange version.map alphabetically.
* other minor modifications from review comment.
---
 MAINTAINERS                  |    4 +
 config/rte_config.h          |    3 +
 doc/api/doxy-api-index.md    |    1 +
 doc/api/doxy-api.conf.in     |    1 +
 lib/dmadev/meson.build       |    7 +
 lib/dmadev/rte_dmadev.c      |  545 ++++++++++++++++++++++
 lib/dmadev/rte_dmadev.h      | 1041 ++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h |  182 ++++++++
 lib/dmadev/rte_dmadev_pmd.h  |   72 +++
 lib/dmadev/version.map       |   37 ++
 lib/meson.build              |    1 +
 11 files changed, 1894 insertions(+)
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index af2a91d..e01a07f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -495,6 +495,10 @@ F: drivers/raw/skeleton/
 F: app/test/test_rawdev.c
 F: doc/guides/prog_guide/rawdev.rst
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+
 
 Memory Pool Drivers
 -------------------
diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c..331a431 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -81,6 +81,9 @@
 /* rawdev defines */
 #define RTE_RAWDEV_MAX_DEVS 64
 
+/* dmadev defines */
+#define RTE_DMADEV_MAX_DEVS 64
+
 /* ip_fragmentation defines */
 #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
 #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107..ce08250 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -27,6 +27,7 @@ The public API headers are grouped by topics:
   [event_timer_adapter]    (@ref rte_event_timer_adapter.h),
   [event_crypto_adapter]   (@ref rte_event_crypto_adapter.h),
   [rawdev]             (@ref rte_rawdev.h),
+  [dmadev]             (@ref rte_dmadev.h),
   [metrics]            (@ref rte_metrics.h),
   [bitrate]            (@ref rte_bitrate.h),
   [latency]            (@ref rte_latencystats.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 325a019..109ec1f 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -35,6 +35,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
                           @TOPDIR@/lib/distributor \
+                          @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/efd \
                           @TOPDIR@/lib/ethdev \
                           @TOPDIR@/lib/eventdev \
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000..d2fc85e
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+sources = files('rte_dmadev.c')
+headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000..a6d8e3be
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,545 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
+
+static const char *MZ_RTE_DMADEV_DATA = "rte_dmadev_data";
+/* Shared memory between primary and secondary processes. */
+static struct {
+	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
+} *dmadev_shared_data;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dmadev_logtype, INFO);
+#define RTE_DMADEV_LOG(level, ...) \
+	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "" __VA_ARGS__)
+
+/* Macros to check for valid device id */
+#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dmadev_is_valid_dev(dev_id)) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+static int
+dmadev_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMADEV_LOG(ERR, "Name can't be NULL\n");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMADEV_LOG(ERR, "Zero length DMA device name\n");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
+		RTE_DMADEV_LOG(ERR, "DMA device name is too long\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint16_t
+dmadev_find_free_dev(void)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (dmadev_shared_data->data[i].dev_name[0] == '\0') {
+			RTE_ASSERT(rte_dmadevices[i].state ==
+				   RTE_DMADEV_UNUSED);
+			return i;
+		}
+	}
+
+	return RTE_DMADEV_MAX_DEVS;
+}
+
+static struct rte_dmadev*
+dmadev_find(const char *name)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&
+		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
+			return &rte_dmadevices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dmadev_shared_data_prepare(void)
+{
+	const struct rte_memzone *mz;
+
+	if (dmadev_shared_data == NULL) {
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			/* Allocate port data and ownership shared memory. */
+			mz = rte_memzone_reserve(MZ_RTE_DMADEV_DATA,
+					 sizeof(*dmadev_shared_data),
+					 rte_socket_id(), 0);
+		} else
+			mz = rte_memzone_lookup(MZ_RTE_DMADEV_DATA);
+		if (mz == NULL)
+			return -ENOMEM;
+
+		dmadev_shared_data = mz->addr;
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+			memset(dmadev_shared_data->data, 0,
+			       sizeof(dmadev_shared_data->data));
+	}
+
+	return 0;
+}
+
+static struct rte_dmadev *
+dmadev_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t dev_id;
+
+	dev = dmadev_find(name);
+	if (dev != NULL) {
+		RTE_DMADEV_LOG(ERR, "DMA device already allocated\n");
+		return NULL;
+	}
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	dev_id = dmadev_find_free_dev();
+	if (dev_id == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices\n");
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+	dev->data = &dmadev_shared_data->data[dev_id];
+	dev->data->dev_id = dev_id;
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+
+	return dev;
+}
+
+static struct rte_dmadev *
+dmadev_attach_secondary(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t i;
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %s is not driven by the primary process\n",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[i];
+	dev->data = &dmadev_shared_data->data[i];
+	RTE_ASSERT(dev->data->dev_id == i);
+	dev->dev_private = dev->data->dev_private;
+
+	return dev;
+}
+
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dmadev_allocate(name);
+	else
+		dev = dmadev_attach_secondary(name);
+
+	if (dev == NULL)
+		return NULL;
+	dev->state = RTE_DMADEV_ATTACHED;
+
+	return dev;
+}
+
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev)
+{
+	void *dev_private_bak;
+
+	if (dev == NULL)
+		return -EINVAL;
+
+	if (dev->state == RTE_DMADEV_UNUSED)
+		return 0;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
+
+	dev_private_bak = dev->dev_private;
+	memset(dev, 0, sizeof(struct rte_dmadev));
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev->dev_private = dev_private_bak;
+	dev->state = RTE_DMADEV_UNUSED;
+
+	return 0;
+}
+
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name)
+{
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+	return dmadev_find(name);
+}
+
+int
+rte_dmadev_get_dev_id(const char *name)
+{
+	struct rte_dmadev *dev = rte_dmadev_get_device_by_name(name);
+	if (dev != NULL)
+		return dev->data->dev_id;
+	return -EINVAL;
+}
+
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id)
+{
+	return (dev_id < RTE_DMADEV_MAX_DEVS) &&
+		rte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED;
+}
+
+uint16_t
+rte_dmadev_count(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
+			count++;
+	}
+
+	return count;
+}
+
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dmadev_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->device = dev->device;
+	dev_info->nb_vchans = dev->data->dev_conf.max_vchans;
+
+	return 0;
+}
+
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->max_vchans == 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure zero vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->max_vchans > info.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure too many vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_silent &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_SILENT)) {
+		RTE_DMADEV_LOG(ERR, "Device %u don't support silent\n", dev_id);
+		return -EINVAL;
+	}
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf);
+	if (ret == 0)
+		memcpy(&dev->data->dev_conf, dev_conf, sizeof(*dev_conf));
+
+	return ret;
+}
+
+int
+rte_dmadev_start(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already started\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->data->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dmadev_stop(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started == 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already stopped\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dmadev_close(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped before closing\n", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	return (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (conf == NULL)
+		return -EINVAL;
+
+	dev = &rte_dmadevices[dev_id];
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u number of descriptors invalid\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, conf);
+}
+
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (stats == NULL)
+		return -EINVAL;
+	if (vchan >= dev->data->dev_conf.max_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	memset(stats, 0, sizeof(struct rte_dmadev_stats));
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dmadev_stats));
+}
+
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->data->dev_conf.max_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (f == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+
+	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
+	fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
+	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
+	fprintf(f, "  max_vchans_configured: %u\n", info.nb_vchans);
+	fprintf(f, "  silent_mode: %s\n",
+		dev->data->dev_conf.enable_silent ? "on" : "off");
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
+
+int
+rte_dmadev_selftest(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
+	return (*dev->dev_ops->dev_selftest)(dev_id);
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000..1518187
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,1041 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ * Copyright(c) 2021 Marvell International Ltd.
+ * Copyright(c) 2021 SmartShare Systems.
+ */
+
+#ifndef _RTE_DMADEV_H_
+#define _RTE_DMADEV_H_
+
+/**
+ * @file rte_dmadev.h
+ *
+ * RTE DMA (Direct Memory Access) device APIs.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW-DMA-channel |               | HW-DMA-channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW-DMA-Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel. e.g. Application could create
+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+ * virtual DMA channel 1 for memory-to-device transfer scenario.
+ *
+ * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
+ * PCI/SoC device probing phase performed at EAL initialization time. And could
+ * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
+ * phase.
+ *
+ * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dmadev_configure()
+ *     - rte_dmadev_vchan_setup()
+ *     - rte_dmadev_start()
+ *
+ * Then, the application can invoke dataplane APIs to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dmadev_configure() or rte_dmadev_vchan_setup()), it must invoke
+ * rte_dmadev_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dmadev_start() again. The dataplane APIs should not be
+ * invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the
+ * rte_dmadev_close() function.
+ *
+ * The dataplane APIs include two parts:
+ * The first part is the submission of operation requests:
+ *     - rte_dmadev_copy()
+ *     - rte_dmadev_copy_sg()
+ *     - rte_dmadev_fill()
+ *     - rte_dmadev_submit()
+ *
+ * These APIs could work with different virtual DMA channels which have
+ * different contexts.
+ *
+ * The first three APIs are used to submit the operation request to the virtual
+ * DMA channel, if the submission is successful, a uint16_t ring_idx is
+ * returned, otherwise a negative number is returned.
+ *
+ * The last API was used to issue doorbell to hardware, and also there are flags
+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
+ * same work.
+ *
+ * The second part is to obtain the result of requests:
+ *     - rte_dmadev_completed()
+ *         - return the number of operation requests completed successfully.
+ *     - rte_dmadev_completed_status()
+ *         - return the number of operation requests completed.
+ *
+ * @note If the dmadev works in silent mode (@see RTE_DMADEV_CAPA_SILENT),
+ * application does not invoke the above two completed APIs.
+ *
+ * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
+ * rte_dmadev_fill()) returned, the rules are as follows:
+ *     - ring_idx for each virtual DMA channel are independent.
+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *       when it reach UINT16_MAX, it wraps back to zero.
+ *     - This ring_idx can be used by applications to track per-operation
+ *       metadata in an application-defined circular ring.
+ *     - The initial ring_idx of a virtual DMA channel is zero, after the
+ *       device is stopped, the ring_idx needs to be reset to zero.
+ *
+ * One example:
+ *     - step-1: start one dmadev
+ *     - step-2: enqueue a copy operation, the ring_idx return is 0
+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1
+ *     - ...
+ *     - step-101: stop the dmadev
+ *     - step-102: start the dmadev
+ *     - step-103: enqueue a copy operation, the cookie return is 0
+ *     - ...
+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *
+ * The DMA operation address used in enqueue APIs (i.e. rte_dmadev_copy(),
+ * rte_dmadev_copy_sg(), rte_dmadev_fill()) defined as rte_iova_t type. The
+ * dmadev supports two types of address: memory address and device address.
+ *
+ * - memory address: the source and destination address of the memory-to-memory
+ * transfer type, or the source address of the memory-to-device transfer type,
+ * or the destination address of the device-to-memory transfer type.
+ * @note If the device support SVA (@see RTE_DMADEV_CAPA_SVA), the memory
+ * address can be any VA address, otherwise it must be an IOVA address.
+ *
+ * - device address: the source and destination address of the device-to-device
+ * transfer type, or the source address of the device-to-memory transfer type,
+ * or the destination address of the memory-to-device transfer type.
+ *
+ * By default, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
+ *
+ */
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the device identifier for the named DMA device.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   Returns DMA device identifier on success.
+ *   - <0: Failure to find named DMA device.
+ */
+__rte_experimental
+int
+rte_dmadev_get_dev_id(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_count(void);
+
+/* Enumerates DMA device capabilities. */
+#define RTE_DMADEV_CAPA_MEM_TO_MEM	(1ull << 0)
+/**< DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_MEM_TO_DEV	(1ull << 1)
+/**< DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_MEM	(1ull << 2)
+/**< DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_DEV	(1ull << 3)
+/**< DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_SVA		(1ull << 4)
+/**< DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_SILENT		(1ull << 5)
+/**< DMA device support work in silent mode.
+ * In this mode, application don't required to invoke rte_dmadev_completed*()
+ * API.
+ *
+ * @see struct rte_dmadev_conf::silent_mode
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY	(1ull << 32)
+/**< DMA device support copy ops.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY_SG	(1ull << 33)
+/**< DMA device support scatter-list copy ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_FILL	(1ull << 34)
+/**< DMA device support fill ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ */
+struct rte_dmadev_info {
+	struct rte_device *device; /**< Generic Device information. */
+	uint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channels supported. */
+	uint16_t max_desc;
+	/**< Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/**< Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   - =0: Success, driver updates the information of the DMA device.
+ *   - <0: Error code returned by the driver info get function.
+ *
+ */
+__rte_experimental
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ */
+struct rte_dmadev_conf {
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channel to use.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dmadev_info which get from rte_dmadev_info_get().
+	 */
+	bool enable_silent;
+	/**< Indicates whether to enable silent mode.
+	 * false-default mode, true-silent mode.
+	 * This value can be set to true only when the SILENT capability is
+	 * supported.
+	 *
+	 * @see RTE_DMADEV_CAPA_SILENT
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dmadev_conf
+ *   object.
+ *
+ * @return
+ *   - =0: Success, device configured.
+ *   - <0: Error code returned by the driver configuration function.
+ */
+__rte_experimental
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device started.
+ *   - <0: Error code returned by the driver start function.
+ */
+__rte_experimental
+int
+rte_dmadev_start(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dmadev_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device stopped.
+ *   - <0: Error code returned by the driver stop function.
+ */
+__rte_experimental
+int
+rte_dmadev_stop(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *  - =0: Successfully close device
+ *  - <0: Failure to close device
+ */
+__rte_experimental
+int
+rte_dmadev_close(uint16_t dev_id);
+
+/**
+ * rte_dma_direction - DMA transfer direction defines.
+ */
+enum rte_dma_direction {
+	RTE_DMA_DIR_MEM_TO_MEM,
+	/**< DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV,
+	/**< DMA transfer direction - from memory to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is SoCs memory) to device (which is host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM,
+	/**< DMA transfer direction - from device to memory.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to memory (which is SoCs memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV,
+	/**< DMA transfer direction - from device to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to the device (which is another host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+};
+
+/**
+ * enum rte_dmadev_port_type - DMA access port type defines.
+ *
+ * @see struct rte_dmadev_port_param::port_type
+ */
+enum rte_dmadev_port_type {
+	RTE_DMADEV_PORT_NONE,
+	RTE_DMADEV_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dmadev_vchan_conf::src_port
+ * @see struct rte_dmadev_vchan_conf::dst_port
+ */
+struct rte_dmadev_port_param {
+	enum rte_dmadev_port_type port_type;
+	/**< The device access port type.
+	 * @see enum rte_dmadev_port_type
+	 */
+	union {
+		/* The following model shows SoC's PCIe module connects to
+		 * multiple PCIe hosts and multiple endpoints. The PCIe module
+		 * has an integrate DMA controller.
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * System Bus
+		 *    |     ----------PCIe module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIe Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIe Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIe Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 */
+		/** The following structure is used to describe the PCIe access
+		 * port parameters.
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		struct {
+			uint64_t coreid : 4; /**< PCIe core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			uint64_t pasid : 20;
+			/**< The pasid filed in TLP packet. */
+			uint64_t attr : 3;
+			/**< The attributes filed in TLP packet. */
+			uint64_t ph : 2;
+			/**< The processing hint filed in TLP packet. */
+			uint64_t st : 16;
+			/**< The steering tag filed in TLP packet. */
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ */
+struct rte_dmadev_vchan_conf {
+	enum rte_dma_direction direction;
+	/**< Transfer direction
+	 * @see enum rte_dma_direction
+	 */
+	uint16_t nb_desc;
+	/**< Number of descriptor for the virtual DMA channel */
+	struct rte_dmadev_port_param src_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+	struct rte_dmadev_port_param dst_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dmadev_vchan_conf object.
+ *
+ * @return
+ *   - >=0: Allocate success, it is the virtual DMA channel id. This value must
+ *          be less than the field 'max_vchans' of struct rte_dmadev_conf
+ *          which configured by rte_dmadev_configure().
+ *   - <0: Error code returned by the driver virtual channel setup function.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf);
+
+/**
+ * rte_dmadev_stats - running statistics.
+ */
+struct rte_dmadev_stats {
+	uint64_t submitted_count;
+	/**< Count of operations which were submitted to hardware. */
+	uint64_t completed_fail_count;
+	/**< Count of operations which failed to complete. */
+	uint64_t completed_count;
+	/**< Count of operations which successfully complete. */
+};
+
+#define RTE_DMADEV_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dmadev_stats
+ *   object.
+ *
+ * @return
+ *   - =0: Successfully retrieve stats.
+ *   - <0: Failure to retrieve stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   - =0: Successfully reset stats.
+ *   - <0: Failure to reset stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Non-zero otherwise.
+ */
+__rte_experimental
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger the dmadev self test.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - 0: Selftest successful.
+ *   - -ENOTSUP if the device doesn't support selftest
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_selftest(uint16_t dev_id);
+
+/**
+ * rte_dma_status_code - DMA transfer result status code defines.
+ */
+enum rte_dma_status_code {
+	RTE_DMA_STATUS_SUCCESSFUL,
+	/**< The operation completed successfully. */
+	RTE_DMA_STATUS_USRER_ABORT,
+	/**< The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPTED,
+	/**< The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/**< The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/**< The operation failed to complete due invalid destination
+	 * address.
+	 */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/**< The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/**< The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/**< The operation failed to complete due bus err. */
+	RTE_DMA_STATUS_DATA_POISION,
+	/**< The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/**< The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/**< The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the memory-to-device/
+	 * device-to-memory/device-to-device transfer scenario.
+	 */
+	RTE_DMA_STATUS_UNKNOWN = 0x100,
+	/**< The operation failed to complete due unknown reason.
+	 * The initial value is 256, which reserves space for future errors.
+	 */
+};
+
+/**
+ * rte_dmadev_sge - can hold scatter DMA operation request entry.
+ */
+struct rte_dmadev_sge {
+	rte_iova_t addr; /**< The DMA operation address. */
+	uint32_t length; /**< The DMA operation length. */
+};
+
+#include "rte_dmadev_core.h"
+
+/* DMA flags to augment operation preparation. */
+#define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
+/**< DMA fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ * If the specify DMA HW works in-order (it means it has default fence between
+ * operations), this flag could be NOP.
+ *
+ * @see rte_dmadev_copy()
+ * @see rte_dmadev_copy_sg()
+ * @see rte_dmadev_fill()
+ */
+
+#define RTE_DMA_OP_FLAG_SUBMIT	(1ull << 1)
+/**< DMA submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ */
+
+#define RTE_DMA_OP_FLAG_LLC	(1ull << 2)
+/**< DMA write data to low level cache hint.
+ * Used for performance optimization, this is just a hint, and there is no
+ * capability bit for this, driver should not return error if this flag was set.
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy job.
+ *   - <0: Error code returned by the driver copy function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+		uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    src == NULL || dst == NULL || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+#endif
+
+	return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter list copy operation to be performed by hardware, if
+ * the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell
+ * to begin this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The pointer of source scatter entry array.
+ * @param dst
+ *   The pointer of destination scatter entry array.
+ * @param nb_src
+ *   The number of source scatter entry.
+ * @param nb_dst
+ *   The number of destination scatter entry.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy scatterlist job.
+ *   - <0: Error code returned by the driver copy scatterlist function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
+		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
+		   uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+#endif
+
+	return (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued fill job.
+ *   - <0: Error code returned by the driver fill function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
+		rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    dst == NULL || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+#endif
+
+	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dmadev_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   - =0: Successfully trigger hardware.
+ *   - <0: Failure to trigger hardware.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+#endif
+
+	return (*dev->submit)(dev, vchan);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed. This return value
+ *   must be less than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		     uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    nb_cpls == 0)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been completed, and the
+ * operations result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   The error code of operations that completed.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that completed. This return value must be less
+ *   than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
+			    const uint16_t nb_cpls, uint16_t *last_idx,
+			    enum rte_dma_status_code *status)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    nb_cpls == 0 || status == NULL)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_H_ */
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000..0122f67
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#ifndef _RTE_DMADEV_CORE_H_
+#define _RTE_DMADEV_CORE_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device internal header.
+ *
+ * This header contains internal data types, that are used by the DMA devices
+ * in order to expose their ops to the class.
+ *
+ * Applications should not use these API directly.
+ *
+ */
+
+struct rte_dmadev;
+
+typedef int (*rte_dmadev_info_get_t)(const struct rte_dmadev *dev,
+				     struct rte_dmadev_info *dev_info,
+				     uint32_t info_sz);
+/**< @internal Used to get device information of a device. */
+
+typedef int (*rte_dmadev_configure_t)(struct rte_dmadev *dev,
+				      const struct rte_dmadev_conf *dev_conf);
+/**< @internal Used to configure a device. */
+
+typedef int (*rte_dmadev_start_t)(struct rte_dmadev *dev);
+/**< @internal Used to start a configured device. */
+
+typedef int (*rte_dmadev_stop_t)(struct rte_dmadev *dev);
+/**< @internal Used to stop a configured device. */
+
+typedef int (*rte_dmadev_close_t)(struct rte_dmadev *dev);
+/**< @internal Used to close a configured device. */
+
+typedef int (*rte_dmadev_vchan_setup_t)(struct rte_dmadev *dev,
+				const struct rte_dmadev_vchan_conf *conf);
+/**< @internal Used to allocate and set up a virtual DMA channel. */
+
+typedef int (*rte_dmadev_stats_get_t)(const struct rte_dmadev *dev,
+			uint16_t vchan, struct rte_dmadev_stats *stats,
+			uint32_t stats_sz);
+/**< @internal Used to retrieve basic statistics. */
+
+typedef int (*rte_dmadev_stats_reset_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to reset basic statistics. */
+
+typedef int (*rte_dmadev_dump_t)(const struct rte_dmadev *dev, FILE *f);
+/**< @internal Used to dump internal information. */
+
+typedef int (*rte_dmadev_selftest_t)(uint16_t dev_id);
+/**< @internal Used to start dmadev selftest. */
+
+typedef int (*rte_dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 rte_iova_t src, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a copy operation. */
+
+typedef int (*rte_dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
+				    const struct rte_dmadev_sge *src,
+				    const struct rte_dmadev_sge *dst,
+				    uint16_t nb_src, uint16_t nb_dst,
+				    uint64_t flags);
+/**< @internal Used to enqueue a scatter list copy operation. */
+
+typedef int (*rte_dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 uint64_t pattern, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a fill operation. */
+
+typedef int (*rte_dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to trigger hardware to begin working. */
+
+typedef uint16_t (*rte_dmadev_completed_t)(struct rte_dmadev *dev,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+/**< @internal Used to return number of successful completed operations. */
+
+typedef uint16_t (*rte_dmadev_completed_status_t)(struct rte_dmadev *dev,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+/**< @internal Used to return number of completed operations. */
+
+/**
+ * Possible states of a DMA device.
+ */
+enum rte_dmadev_state {
+	RTE_DMADEV_UNUSED = 0,
+	/**< Device is unused before being probed. */
+	RTE_DMADEV_ATTACHED,
+	/**< Device is attached when allocated in probing. */
+};
+
+/**
+ * DMA device operations function pointer table
+ */
+struct rte_dmadev_ops {
+	rte_dmadev_info_get_t dev_info_get;
+	rte_dmadev_configure_t dev_configure;
+	rte_dmadev_start_t dev_start;
+	rte_dmadev_stop_t dev_stop;
+	rte_dmadev_close_t dev_close;
+	rte_dmadev_vchan_setup_t vchan_setup;
+	rte_dmadev_stats_get_t stats_get;
+	rte_dmadev_stats_reset_t stats_reset;
+	rte_dmadev_dump_t dev_dump;
+	rte_dmadev_selftest_t dev_selftest;
+};
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_dmadev_data {
+	void *dev_private;
+	/**< PMD-specific private data.
+	 * This is a copy of the 'dev_private' field in the 'struct rte_dmadev'
+	 * from primary process, it is used by the secondary process to get
+	 * dev_private information.
+	 */
+	uint16_t dev_id; /**< Device [external] identifier. */
+	char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ *
+ * The dataplane APIs are located at the beginning of the structure, along
+ * with the pointer to where all the data elements for the particular device
+ * are stored in shared memory. This split scheme allows the function pointer
+ * and driver data to be per-process, while the actual configuration data for
+ * the device is shared.
+ * And the 'dev_private' field was placed in the first cache line to optimize
+ * performance because the PMD driver mainly depends on this field.
+ */
+struct rte_dmadev {
+	rte_dmadev_copy_t copy;
+	rte_dmadev_copy_sg_t copy_sg;
+	rte_dmadev_fill_t fill;
+	rte_dmadev_submit_t submit;
+	rte_dmadev_completed_t completed;
+	rte_dmadev_completed_status_t completed_status;
+	void *reserved_ptr; /**< Reserved for future IO function. */
+	void *dev_private;
+	/**< PMD-specific private data.
+	 *
+	 * - If is the primary process, after dmadev allocated by
+	 * rte_dmadev_pmd_allocate(), the PCI/SoC device probing should
+	 * initialize this field, and copy it's value to the 'dev_private'
+	 * field of 'struct rte_dmadev_data' which pointer by 'data' filed.
+	 *
+	 * - If is the secondary process, dmadev framework will initialize this
+	 * field by copy from 'dev_private' field of 'struct rte_dmadev_data'
+	 * which initialized by primary process.
+	 *
+	 * @note It's the primary process responsibility to deinitialize this
+	 * field after invoke rte_dmadev_pmd_release() in the PCI/SoC device
+	 * removing stage.
+	 */
+	struct rte_dmadev_data *data; /**< Pointer to device data. */
+	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
+	struct rte_device *device;
+	/**< Device info which supplied during device initialization. */
+	enum rte_dmadev_state state; /**< Flag indicating the device state. */
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+} __rte_cache_aligned;
+
+extern struct rte_dmadev rte_dmadevices[];
+
+#endif /* _RTE_DMADEV_CORE_H_ */
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000..45141f9
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_PMD_H_
+#define _RTE_DMADEV_PMD_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device PMD APIs
+ *
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param dev
+ *   Device to be released.
+ *
+ * @return
+ *   - 0 on success, negative on error
+ */
+__rte_internal
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev);
+
+/**
+ * @internal
+ * Return the DMA device based on the device name.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_PMD_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000..0f2ed4b
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,37 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dmadev_close;
+	rte_dmadev_completed;
+	rte_dmadev_completed_status;
+	rte_dmadev_configure;
+	rte_dmadev_copy;
+	rte_dmadev_copy_sg;
+	rte_dmadev_count;
+	rte_dmadev_dump;
+	rte_dmadev_fill;
+	rte_dmadev_get_dev_id;
+	rte_dmadev_info_get;
+	rte_dmadev_is_valid_dev;
+	rte_dmadev_selftest;
+	rte_dmadev_start;
+	rte_dmadev_stats_get;
+	rte_dmadev_stats_reset;
+	rte_dmadev_stop;
+	rte_dmadev_submit;
+	rte_dmadev_vchan_setup;
+
+	local: *;
+};
+
+INTERNAL {
+        global:
+
+	rte_dmadevices;
+	rte_dmadev_get_device_by_name;
+	rte_dmadev_pmd_allocate;
+	rte_dmadev_pmd_release;
+
+	local: *;
+};
+
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4..68d239f 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -60,6 +60,7 @@ libraries = [
         'bpf',
         'graph',
         'node',
+        'dmadev',
 ]
 
 if is_windows
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v6] dmadev: introduce DMA device library
  2021-07-19  6:21   ` Jerin Jacob
@ 2021-07-19 13:20     ` fengchengwen
  2021-07-19 13:36       ` Jerin Jacob
  0 siblings, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-07-19 13:20 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Thomas Monjalon, Ferruh Yigit, Richardson, Bruce, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

Many thank.

Most of the comment fixed in v7, and v7 still include implementation.

I did a thorough test of the implementation, so could you please review
the implementation also ?

One comment is modified as follows:
COMMENT:
>> +                *    |     |   |        ------------------
>> +                *    |     |DMA|        |                |        ------
>> +                *    |     |   |        |                |--------| EP |
>> +                *    |     |   |--------| PCIE Core2     |        ------
>> +                *    |     |   |        |                |        ------
>> +                *    |     |   |        |                |--------| EP |
>> +                *    |     |   |        |                |        ------
>> +                *    |     -----        ------------------
>
>
> This diagram does not show correctly in doxygen. Please fix it.

REPLY: I can't find the right way to show it as correctly except split them to two
part (one with doxygen format, and the other not), so if you have a good idea,
please let me know, thanks.


On 2021/7/19 14:21, Jerin Jacob wrote:
> On Mon, Jul 19, 2021 at 9:02 AM Chengwen Feng <fengchengwen@huawei.com> wrote:
>>
>> This patch introduce 'dmadevice' which is a generic type of DMA
>> device.
>>
>> The APIs of dmadev library exposes some generic operations which can
>> enable configuration and I/O with the DMA devices.
>>
>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> 
> 
> The API specification aspects look pretty good to me.
> 
> Some minor comments are below. You can add my Acked by on future version
> API header file where you will split the patch.
> 

[snip]


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v6] dmadev: introduce DMA device library
  2021-07-19 13:20     ` fengchengwen
@ 2021-07-19 13:36       ` Jerin Jacob
  0 siblings, 0 replies; 339+ messages in thread
From: Jerin Jacob @ 2021-07-19 13:36 UTC (permalink / raw)
  To: fengchengwen
  Cc: Thomas Monjalon, Ferruh Yigit, Richardson, Bruce, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On Mon, Jul 19, 2021 at 6:50 PM fengchengwen <fengchengwen@huawei.com> wrote:
>
> Many thank.
>
> Most of the comment fixed in v7, and v7 still include implementation.

Changes good to me.

>
> I did a thorough test of the implementation, so could you please review
> the implementation also ?

Wiil do

>
> One comment is modified as follows:
> COMMENT:
> >> +                *    |     |   |        ------------------
> >> +                *    |     |DMA|        |                |        ------
> >> +                *    |     |   |        |                |--------| EP |
> >> +                *    |     |   |--------| PCIE Core2     |        ------
> >> +                *    |     |   |        |                |        ------
> >> +                *    |     |   |        |                |--------| EP |
> >> +                *    |     |   |        |                |        ------
> >> +                *    |     -----        ------------------
> >
> >
> > This diagram does not show correctly in doxygen. Please fix it.
>
> REPLY: I can't find the right way to show it as correctly except split them to two
> part (one with doxygen format, and the other not), so if you have a good idea,
> please let me know, thanks.

Use   \code{.unparsed}  \endcode

diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 1518187c1e..df2d9f975c 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -463,12 +463,14 @@ struct rte_dmadev_port_param {
         * @see enum rte_dmadev_port_type
         */
        union {
-               /* The following model shows SoC's PCIe module connects to
+               /**
+                * The following model shows SoC's PCIe module connects to
                 * multiple PCIe hosts and multiple endpoints. The PCIe module
                 * has an integrate DMA controller.


integrate -> integrated


                 * If the DMA wants to access the memory of host A, it can be
                 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
                 *
+                * \code{.unparsed}
                 * System Bus
                 *    |     ----------PCIe module----------
                 *    |     Bus
@@ -499,8 +501,9 @@ struct rte_dmadev_port_param {
                 *    |     |   |        |                |--------| EP |
                 *    |     |   |        |                |        ------
                 *    |     -----        ------------------
-                */
-               /** The following structure is used to describe the PCIe access
+                *   \endcode
+                *
+                * The following structure is used to describe the PCIe access
                 * port parameters.
                 *
                 * @note If some fields can not be supported by the
[main]dell[dpdk.org]


>
> On 2021/7/19 14:21, Jerin Jacob wrote:
> > On Mon, Jul 19, 2021 at 9:02 AM Chengwen Feng <fengchengwen@huawei.com> wrote:
> >>
> >> This patch introduce 'dmadevice' which is a generic type of DMA
> >> device.
> >>
> >> The APIs of dmadev library exposes some generic operations which can
> >> enable configuration and I/O with the DMA devices.
> >>
> >> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> >
> >
> > The API specification aspects look pretty good to me.
> >
> > Some minor comments are below. You can add my Acked by on future version
> > API header file where you will split the patch.
> >
>
> [snip]
>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v8] dmadev: introduce DMA device library
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (10 preceding siblings ...)
  2021-07-19 13:05 ` [dpdk-dev] [PATCH v7] " Chengwen Feng
@ 2021-07-20  1:14 ` Chengwen Feng
  2021-07-20  5:03   ` Jerin Jacob
  2021-07-20 11:12 ` [dpdk-dev] [PATCH v9] " Chengwen Feng
                   ` (17 subsequent siblings)
  29 siblings, 1 reply; 339+ messages in thread
From: Chengwen Feng @ 2021-07-20  1:14 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce 'dmadevice' which is a generic type of DMA
device.

The APIs of dmadev library exposes some generic operations which can
enable configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
v8:
* fix pcie access port diagram doxygen problem.
* fix typo.
* fix compile warning when enable DMADEV_DEBUG.
v7:
* add rte_dmadev_get_dev_id API.
* fix typo.
* use the default macro assignment scheme.
* rename RTE_DMA_DEV_CAPA_* to RTE_DMADEV_CAPA_*.
* rename rte_dmadev_conf.silent_mode to enable_silent.
* add memset when get stats.
v6:
* delete fence capability.
* delete vchan_release ops.
* copy_sg direct use src/dst/nb_src/nb_dst as paramter.
* define rte_dma_direction, don't support multiple direction in the
  same vchan.
* fix segment fault when allocate.
* fix typo.
* fix comments format.
v5:
* add doxy-api-* file modify.
* use RTE_LOG_REGISTER_DEFAULT.
* fix typo.
* resolve some incorrect comments.
* fix some doxgen problem.
* fix version.map still hold rte_dmadev_completed_fails.
v4:
* replace xxx_complete_fails with xxx_completed_status.
* add SILENT capability, also a silent_mode in rte_dmadev_conf.
* add op_flag_llc for performance.
* rename dmadev_xxx_t to rte_dmadev_xxx_t to avoid namespace conflict.
* delete filed 'enqueued_count' from rte_dmadev_stats.
* make rte_dmadev hold 'dev_private' filed.
* add RTE_DMA_STATUS_NOT_ATTEMPED status code.
* rename RTE_DMA_STATUS_ACTIVE_DROP to RTE_DMA_STATUS_USER_ABORT.
* rename rte_dma_sg(e) to rte_dmadev_sg(e) to make sure all struct
  prefix with rte_dmadev.
* put the comment afterwards.
* fix some doxgen problem.
* delete macro RTE_DMADEV_VALID_DEV_ID_OR_RET and
  RTE_DMADEV_PTR_OR_ERR_RET.
* replace strlcpy with rte_strscpy.
* other minor modifications from review comment.
v3:
* rm reset and fill_sg ops.
* rm MT-safe capabilities.
* add submit flag.
* redefine rte_dma_sg to implement asymmetric copy.
* delete some reserved field for future use.
* rearrangement rte_dmadev/rte_dmadev_data struct.
* refresh rte_dmadev.h copyright.
* update vchan setup parameter.
* modified some inappropriate descriptions.
* arrange version.map alphabetically.
* other minor modifications from review comment.
---
 MAINTAINERS                  |    4 +
 config/rte_config.h          |    3 +
 doc/api/doxy-api-index.md    |    1 +
 doc/api/doxy-api.conf.in     |    1 +
 lib/dmadev/meson.build       |    7 +
 lib/dmadev/rte_dmadev.c      |  545 ++++++++++++++++++++++
 lib/dmadev/rte_dmadev.h      | 1041 ++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h |  182 ++++++++
 lib/dmadev/rte_dmadev_pmd.h  |   72 +++
 lib/dmadev/version.map       |   37 ++
 lib/meson.build              |    1 +
 11 files changed, 1894 insertions(+)
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index af2a91d..e01a07f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -495,6 +495,10 @@ F: drivers/raw/skeleton/
 F: app/test/test_rawdev.c
 F: doc/guides/prog_guide/rawdev.rst
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+
 
 Memory Pool Drivers
 -------------------
diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c..331a431 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -81,6 +81,9 @@
 /* rawdev defines */
 #define RTE_RAWDEV_MAX_DEVS 64
 
+/* dmadev defines */
+#define RTE_DMADEV_MAX_DEVS 64
+
 /* ip_fragmentation defines */
 #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
 #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107..ce08250 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -27,6 +27,7 @@ The public API headers are grouped by topics:
   [event_timer_adapter]    (@ref rte_event_timer_adapter.h),
   [event_crypto_adapter]   (@ref rte_event_crypto_adapter.h),
   [rawdev]             (@ref rte_rawdev.h),
+  [dmadev]             (@ref rte_dmadev.h),
   [metrics]            (@ref rte_metrics.h),
   [bitrate]            (@ref rte_bitrate.h),
   [latency]            (@ref rte_latencystats.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 325a019..109ec1f 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -35,6 +35,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
                           @TOPDIR@/lib/distributor \
+                          @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/efd \
                           @TOPDIR@/lib/ethdev \
                           @TOPDIR@/lib/eventdev \
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000..d2fc85e
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+sources = files('rte_dmadev.c')
+headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000..a6d8e3be
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,545 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
+
+static const char *MZ_RTE_DMADEV_DATA = "rte_dmadev_data";
+/* Shared memory between primary and secondary processes. */
+static struct {
+	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
+} *dmadev_shared_data;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dmadev_logtype, INFO);
+#define RTE_DMADEV_LOG(level, ...) \
+	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "" __VA_ARGS__)
+
+/* Macros to check for valid device id */
+#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dmadev_is_valid_dev(dev_id)) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+static int
+dmadev_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMADEV_LOG(ERR, "Name can't be NULL\n");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMADEV_LOG(ERR, "Zero length DMA device name\n");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
+		RTE_DMADEV_LOG(ERR, "DMA device name is too long\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint16_t
+dmadev_find_free_dev(void)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (dmadev_shared_data->data[i].dev_name[0] == '\0') {
+			RTE_ASSERT(rte_dmadevices[i].state ==
+				   RTE_DMADEV_UNUSED);
+			return i;
+		}
+	}
+
+	return RTE_DMADEV_MAX_DEVS;
+}
+
+static struct rte_dmadev*
+dmadev_find(const char *name)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&
+		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
+			return &rte_dmadevices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dmadev_shared_data_prepare(void)
+{
+	const struct rte_memzone *mz;
+
+	if (dmadev_shared_data == NULL) {
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			/* Allocate port data and ownership shared memory. */
+			mz = rte_memzone_reserve(MZ_RTE_DMADEV_DATA,
+					 sizeof(*dmadev_shared_data),
+					 rte_socket_id(), 0);
+		} else
+			mz = rte_memzone_lookup(MZ_RTE_DMADEV_DATA);
+		if (mz == NULL)
+			return -ENOMEM;
+
+		dmadev_shared_data = mz->addr;
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+			memset(dmadev_shared_data->data, 0,
+			       sizeof(dmadev_shared_data->data));
+	}
+
+	return 0;
+}
+
+static struct rte_dmadev *
+dmadev_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t dev_id;
+
+	dev = dmadev_find(name);
+	if (dev != NULL) {
+		RTE_DMADEV_LOG(ERR, "DMA device already allocated\n");
+		return NULL;
+	}
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	dev_id = dmadev_find_free_dev();
+	if (dev_id == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices\n");
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+	dev->data = &dmadev_shared_data->data[dev_id];
+	dev->data->dev_id = dev_id;
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+
+	return dev;
+}
+
+static struct rte_dmadev *
+dmadev_attach_secondary(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t i;
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %s is not driven by the primary process\n",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[i];
+	dev->data = &dmadev_shared_data->data[i];
+	RTE_ASSERT(dev->data->dev_id == i);
+	dev->dev_private = dev->data->dev_private;
+
+	return dev;
+}
+
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dmadev_allocate(name);
+	else
+		dev = dmadev_attach_secondary(name);
+
+	if (dev == NULL)
+		return NULL;
+	dev->state = RTE_DMADEV_ATTACHED;
+
+	return dev;
+}
+
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev)
+{
+	void *dev_private_bak;
+
+	if (dev == NULL)
+		return -EINVAL;
+
+	if (dev->state == RTE_DMADEV_UNUSED)
+		return 0;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
+
+	dev_private_bak = dev->dev_private;
+	memset(dev, 0, sizeof(struct rte_dmadev));
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev->dev_private = dev_private_bak;
+	dev->state = RTE_DMADEV_UNUSED;
+
+	return 0;
+}
+
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name)
+{
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+	return dmadev_find(name);
+}
+
+int
+rte_dmadev_get_dev_id(const char *name)
+{
+	struct rte_dmadev *dev = rte_dmadev_get_device_by_name(name);
+	if (dev != NULL)
+		return dev->data->dev_id;
+	return -EINVAL;
+}
+
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id)
+{
+	return (dev_id < RTE_DMADEV_MAX_DEVS) &&
+		rte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED;
+}
+
+uint16_t
+rte_dmadev_count(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
+			count++;
+	}
+
+	return count;
+}
+
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dmadev_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->device = dev->device;
+	dev_info->nb_vchans = dev->data->dev_conf.max_vchans;
+
+	return 0;
+}
+
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->max_vchans == 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure zero vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->max_vchans > info.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure too many vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_silent &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_SILENT)) {
+		RTE_DMADEV_LOG(ERR, "Device %u don't support silent\n", dev_id);
+		return -EINVAL;
+	}
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf);
+	if (ret == 0)
+		memcpy(&dev->data->dev_conf, dev_conf, sizeof(*dev_conf));
+
+	return ret;
+}
+
+int
+rte_dmadev_start(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already started\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->data->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dmadev_stop(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started == 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already stopped\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dmadev_close(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped before closing\n", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	return (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (conf == NULL)
+		return -EINVAL;
+
+	dev = &rte_dmadevices[dev_id];
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u number of descriptors invalid\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, conf);
+}
+
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (stats == NULL)
+		return -EINVAL;
+	if (vchan >= dev->data->dev_conf.max_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	memset(stats, 0, sizeof(struct rte_dmadev_stats));
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dmadev_stats));
+}
+
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->data->dev_conf.max_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (f == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+
+	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
+	fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
+	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
+	fprintf(f, "  max_vchans_configured: %u\n", info.nb_vchans);
+	fprintf(f, "  silent_mode: %s\n",
+		dev->data->dev_conf.enable_silent ? "on" : "off");
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
+
+int
+rte_dmadev_selftest(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
+	return (*dev->dev_ops->dev_selftest)(dev_id);
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000..53d85f1
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,1041 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ * Copyright(c) 2021 Marvell International Ltd.
+ * Copyright(c) 2021 SmartShare Systems.
+ */
+
+#ifndef _RTE_DMADEV_H_
+#define _RTE_DMADEV_H_
+
+/**
+ * @file rte_dmadev.h
+ *
+ * RTE DMA (Direct Memory Access) device APIs.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW-DMA-channel |               | HW-DMA-channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW-DMA-Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel. e.g. Application could create
+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+ * virtual DMA channel 1 for memory-to-device transfer scenario.
+ *
+ * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
+ * PCI/SoC device probing phase performed at EAL initialization time. And could
+ * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
+ * phase.
+ *
+ * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dmadev_configure()
+ *     - rte_dmadev_vchan_setup()
+ *     - rte_dmadev_start()
+ *
+ * Then, the application can invoke dataplane APIs to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dmadev_configure() or rte_dmadev_vchan_setup()), it must invoke
+ * rte_dmadev_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dmadev_start() again. The dataplane APIs should not be
+ * invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the
+ * rte_dmadev_close() function.
+ *
+ * The dataplane APIs include two parts:
+ * The first part is the submission of operation requests:
+ *     - rte_dmadev_copy()
+ *     - rte_dmadev_copy_sg()
+ *     - rte_dmadev_fill()
+ *     - rte_dmadev_submit()
+ *
+ * These APIs could work with different virtual DMA channels which have
+ * different contexts.
+ *
+ * The first three APIs are used to submit the operation request to the virtual
+ * DMA channel, if the submission is successful, a uint16_t ring_idx is
+ * returned, otherwise a negative number is returned.
+ *
+ * The last API was used to issue doorbell to hardware, and also there are flags
+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
+ * same work.
+ *
+ * The second part is to obtain the result of requests:
+ *     - rte_dmadev_completed()
+ *         - return the number of operation requests completed successfully.
+ *     - rte_dmadev_completed_status()
+ *         - return the number of operation requests completed.
+ *
+ * @note If the dmadev works in silent mode (@see RTE_DMADEV_CAPA_SILENT),
+ * application does not invoke the above two completed APIs.
+ *
+ * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
+ * rte_dmadev_fill()) returned, the rules are as follows:
+ *     - ring_idx for each virtual DMA channel are independent.
+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *       when it reach UINT16_MAX, it wraps back to zero.
+ *     - This ring_idx can be used by applications to track per-operation
+ *       metadata in an application-defined circular ring.
+ *     - The initial ring_idx of a virtual DMA channel is zero, after the
+ *       device is stopped, the ring_idx needs to be reset to zero.
+ *
+ * One example:
+ *     - step-1: start one dmadev
+ *     - step-2: enqueue a copy operation, the ring_idx return is 0
+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1
+ *     - ...
+ *     - step-101: stop the dmadev
+ *     - step-102: start the dmadev
+ *     - step-103: enqueue a copy operation, the cookie return is 0
+ *     - ...
+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *
+ * The DMA operation address used in enqueue APIs (i.e. rte_dmadev_copy(),
+ * rte_dmadev_copy_sg(), rte_dmadev_fill()) defined as rte_iova_t type. The
+ * dmadev supports two types of address: memory address and device address.
+ *
+ * - memory address: the source and destination address of the memory-to-memory
+ * transfer type, or the source address of the memory-to-device transfer type,
+ * or the destination address of the device-to-memory transfer type.
+ * @note If the device support SVA (@see RTE_DMADEV_CAPA_SVA), the memory
+ * address can be any VA address, otherwise it must be an IOVA address.
+ *
+ * - device address: the source and destination address of the device-to-device
+ * transfer type, or the source address of the device-to-memory transfer type,
+ * or the destination address of the memory-to-device transfer type.
+ *
+ * By default, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
+ *
+ */
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the device identifier for the named DMA device.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   Returns DMA device identifier on success.
+ *   - <0: Failure to find named DMA device.
+ */
+__rte_experimental
+int
+rte_dmadev_get_dev_id(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_count(void);
+
+/* Enumerates DMA device capabilities. */
+#define RTE_DMADEV_CAPA_MEM_TO_MEM	(1ull << 0)
+/**< DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_MEM_TO_DEV	(1ull << 1)
+/**< DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_MEM	(1ull << 2)
+/**< DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_DEV	(1ull << 3)
+/**< DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_SVA		(1ull << 4)
+/**< DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_SILENT		(1ull << 5)
+/**< DMA device support work in silent mode.
+ * In this mode, application don't required to invoke rte_dmadev_completed*()
+ * API.
+ *
+ * @see struct rte_dmadev_conf::silent_mode
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY	(1ull << 32)
+/**< DMA device support copy ops.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY_SG	(1ull << 33)
+/**< DMA device support scatter-list copy ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_FILL	(1ull << 34)
+/**< DMA device support fill ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ */
+struct rte_dmadev_info {
+	struct rte_device *device; /**< Generic Device information. */
+	uint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channels supported. */
+	uint16_t max_desc;
+	/**< Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/**< Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   - =0: Success, driver updates the information of the DMA device.
+ *   - <0: Error code returned by the driver info get function.
+ *
+ */
+__rte_experimental
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ */
+struct rte_dmadev_conf {
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channel to use.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dmadev_info which get from rte_dmadev_info_get().
+	 */
+	bool enable_silent;
+	/**< Indicates whether to enable silent mode.
+	 * false-default mode, true-silent mode.
+	 * This value can be set to true only when the SILENT capability is
+	 * supported.
+	 *
+	 * @see RTE_DMADEV_CAPA_SILENT
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dmadev_conf
+ *   object.
+ *
+ * @return
+ *   - =0: Success, device configured.
+ *   - <0: Error code returned by the driver configuration function.
+ */
+__rte_experimental
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device started.
+ *   - <0: Error code returned by the driver start function.
+ */
+__rte_experimental
+int
+rte_dmadev_start(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dmadev_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device stopped.
+ *   - <0: Error code returned by the driver stop function.
+ */
+__rte_experimental
+int
+rte_dmadev_stop(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *  - =0: Successfully close device
+ *  - <0: Failure to close device
+ */
+__rte_experimental
+int
+rte_dmadev_close(uint16_t dev_id);
+
+/**
+ * rte_dma_direction - DMA transfer direction defines.
+ */
+enum rte_dma_direction {
+	RTE_DMA_DIR_MEM_TO_MEM,
+	/**< DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV,
+	/**< DMA transfer direction - from memory to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is SoCs memory) to device (which is host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM,
+	/**< DMA transfer direction - from device to memory.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to memory (which is SoCs memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV,
+	/**< DMA transfer direction - from device to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to the device (which is another host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+};
+
+/**
+ * enum rte_dmadev_port_type - DMA access port type defines.
+ *
+ * @see struct rte_dmadev_port_param::port_type
+ */
+enum rte_dmadev_port_type {
+	RTE_DMADEV_PORT_NONE,
+	RTE_DMADEV_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dmadev_vchan_conf::src_port
+ * @see struct rte_dmadev_vchan_conf::dst_port
+ */
+struct rte_dmadev_port_param {
+	enum rte_dmadev_port_type port_type;
+	/**< The device access port type.
+	 * @see enum rte_dmadev_port_type
+	 */
+	union {
+		/** PCIe access port parameters.
+		 *
+		 * The following model shows SoC's PCIe module connects to
+		 * multiple PCIe hosts and multiple endpoints. The PCIe module
+		 * has an integrated DMA controller.
+		 *
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * \code{.unparsed}
+		 * System Bus
+		 *    |     ----------PCIe module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIe Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIe Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIe Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * \endcode
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		struct {
+			uint64_t coreid : 4; /**< PCIe core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			uint64_t pasid : 20;
+			/**< The pasid filed in TLP packet. */
+			uint64_t attr : 3;
+			/**< The attributes filed in TLP packet. */
+			uint64_t ph : 2;
+			/**< The processing hint filed in TLP packet. */
+			uint64_t st : 16;
+			/**< The steering tag filed in TLP packet. */
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ */
+struct rte_dmadev_vchan_conf {
+	enum rte_dma_direction direction;
+	/**< Transfer direction
+	 * @see enum rte_dma_direction
+	 */
+	uint16_t nb_desc;
+	/**< Number of descriptor for the virtual DMA channel */
+	struct rte_dmadev_port_param src_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+	struct rte_dmadev_port_param dst_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dmadev_vchan_conf object.
+ *
+ * @return
+ *   - >=0: Allocate success, it is the virtual DMA channel id. This value must
+ *          be less than the field 'max_vchans' of struct rte_dmadev_conf
+ *          which configured by rte_dmadev_configure().
+ *   - <0: Error code returned by the driver virtual channel setup function.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf);
+
+/**
+ * rte_dmadev_stats - running statistics.
+ */
+struct rte_dmadev_stats {
+	uint64_t submitted_count;
+	/**< Count of operations which were submitted to hardware. */
+	uint64_t completed_fail_count;
+	/**< Count of operations which failed to complete. */
+	uint64_t completed_count;
+	/**< Count of operations which successfully complete. */
+};
+
+#define RTE_DMADEV_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dmadev_stats
+ *   object.
+ *
+ * @return
+ *   - =0: Successfully retrieve stats.
+ *   - <0: Failure to retrieve stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   - =0: Successfully reset stats.
+ *   - <0: Failure to reset stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Non-zero otherwise.
+ */
+__rte_experimental
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger the dmadev self test.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - 0: Selftest successful.
+ *   - -ENOTSUP if the device doesn't support selftest
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_selftest(uint16_t dev_id);
+
+/**
+ * rte_dma_status_code - DMA transfer result status code defines.
+ */
+enum rte_dma_status_code {
+	RTE_DMA_STATUS_SUCCESSFUL,
+	/**< The operation completed successfully. */
+	RTE_DMA_STATUS_USRER_ABORT,
+	/**< The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPTED,
+	/**< The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/**< The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/**< The operation failed to complete due invalid destination
+	 * address.
+	 */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/**< The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/**< The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/**< The operation failed to complete due bus err. */
+	RTE_DMA_STATUS_DATA_POISION,
+	/**< The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/**< The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/**< The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the memory-to-device/
+	 * device-to-memory/device-to-device transfer scenario.
+	 */
+	RTE_DMA_STATUS_UNKNOWN = 0x100,
+	/**< The operation failed to complete due unknown reason.
+	 * The initial value is 256, which reserves space for future errors.
+	 */
+};
+
+/**
+ * rte_dmadev_sge - can hold scatter DMA operation request entry.
+ */
+struct rte_dmadev_sge {
+	rte_iova_t addr; /**< The DMA operation address. */
+	uint32_t length; /**< The DMA operation length. */
+};
+
+#include "rte_dmadev_core.h"
+
+/* DMA flags to augment operation preparation. */
+#define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
+/**< DMA fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ * If the specify DMA HW works in-order (it means it has default fence between
+ * operations), this flag could be NOP.
+ *
+ * @see rte_dmadev_copy()
+ * @see rte_dmadev_copy_sg()
+ * @see rte_dmadev_fill()
+ */
+
+#define RTE_DMA_OP_FLAG_SUBMIT	(1ull << 1)
+/**< DMA submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ */
+
+#define RTE_DMA_OP_FLAG_LLC	(1ull << 2)
+/**< DMA write data to low level cache hint.
+ * Used for performance optimization, this is just a hint, and there is no
+ * capability bit for this, driver should not return error if this flag was set.
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy job.
+ *   - <0: Error code returned by the driver copy function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+		uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+#endif
+
+	return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter list copy operation to be performed by hardware, if
+ * the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell
+ * to begin this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The pointer of source scatter entry array.
+ * @param dst
+ *   The pointer of destination scatter entry array.
+ * @param nb_src
+ *   The number of source scatter entry.
+ * @param nb_dst
+ *   The number of destination scatter entry.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy scatterlist job.
+ *   - <0: Error code returned by the driver copy scatterlist function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
+		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
+		   uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+#endif
+
+	return (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued fill job.
+ *   - <0: Error code returned by the driver fill function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
+		rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+#endif
+
+	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dmadev_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   - =0: Successfully trigger hardware.
+ *   - <0: Failure to trigger hardware.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+#endif
+
+	return (*dev->submit)(dev, vchan);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed. This return value
+ *   must be less than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		     uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans || nb_cpls == 0)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been completed, and the
+ * operations result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   The error code of operations that completed.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that completed. This return value must be less
+ *   than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
+			    const uint16_t nb_cpls, uint16_t *last_idx,
+			    enum rte_dma_status_code *status)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    nb_cpls == 0 || status == NULL)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_H_ */
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000..0122f67
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#ifndef _RTE_DMADEV_CORE_H_
+#define _RTE_DMADEV_CORE_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device internal header.
+ *
+ * This header contains internal data types, that are used by the DMA devices
+ * in order to expose their ops to the class.
+ *
+ * Applications should not use these API directly.
+ *
+ */
+
+struct rte_dmadev;
+
+typedef int (*rte_dmadev_info_get_t)(const struct rte_dmadev *dev,
+				     struct rte_dmadev_info *dev_info,
+				     uint32_t info_sz);
+/**< @internal Used to get device information of a device. */
+
+typedef int (*rte_dmadev_configure_t)(struct rte_dmadev *dev,
+				      const struct rte_dmadev_conf *dev_conf);
+/**< @internal Used to configure a device. */
+
+typedef int (*rte_dmadev_start_t)(struct rte_dmadev *dev);
+/**< @internal Used to start a configured device. */
+
+typedef int (*rte_dmadev_stop_t)(struct rte_dmadev *dev);
+/**< @internal Used to stop a configured device. */
+
+typedef int (*rte_dmadev_close_t)(struct rte_dmadev *dev);
+/**< @internal Used to close a configured device. */
+
+typedef int (*rte_dmadev_vchan_setup_t)(struct rte_dmadev *dev,
+				const struct rte_dmadev_vchan_conf *conf);
+/**< @internal Used to allocate and set up a virtual DMA channel. */
+
+typedef int (*rte_dmadev_stats_get_t)(const struct rte_dmadev *dev,
+			uint16_t vchan, struct rte_dmadev_stats *stats,
+			uint32_t stats_sz);
+/**< @internal Used to retrieve basic statistics. */
+
+typedef int (*rte_dmadev_stats_reset_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to reset basic statistics. */
+
+typedef int (*rte_dmadev_dump_t)(const struct rte_dmadev *dev, FILE *f);
+/**< @internal Used to dump internal information. */
+
+typedef int (*rte_dmadev_selftest_t)(uint16_t dev_id);
+/**< @internal Used to start dmadev selftest. */
+
+typedef int (*rte_dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 rte_iova_t src, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a copy operation. */
+
+typedef int (*rte_dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
+				    const struct rte_dmadev_sge *src,
+				    const struct rte_dmadev_sge *dst,
+				    uint16_t nb_src, uint16_t nb_dst,
+				    uint64_t flags);
+/**< @internal Used to enqueue a scatter list copy operation. */
+
+typedef int (*rte_dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 uint64_t pattern, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a fill operation. */
+
+typedef int (*rte_dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to trigger hardware to begin working. */
+
+typedef uint16_t (*rte_dmadev_completed_t)(struct rte_dmadev *dev,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+/**< @internal Used to return number of successful completed operations. */
+
+typedef uint16_t (*rte_dmadev_completed_status_t)(struct rte_dmadev *dev,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+/**< @internal Used to return number of completed operations. */
+
+/**
+ * Possible states of a DMA device.
+ */
+enum rte_dmadev_state {
+	RTE_DMADEV_UNUSED = 0,
+	/**< Device is unused before being probed. */
+	RTE_DMADEV_ATTACHED,
+	/**< Device is attached when allocated in probing. */
+};
+
+/**
+ * DMA device operations function pointer table
+ */
+struct rte_dmadev_ops {
+	rte_dmadev_info_get_t dev_info_get;
+	rte_dmadev_configure_t dev_configure;
+	rte_dmadev_start_t dev_start;
+	rte_dmadev_stop_t dev_stop;
+	rte_dmadev_close_t dev_close;
+	rte_dmadev_vchan_setup_t vchan_setup;
+	rte_dmadev_stats_get_t stats_get;
+	rte_dmadev_stats_reset_t stats_reset;
+	rte_dmadev_dump_t dev_dump;
+	rte_dmadev_selftest_t dev_selftest;
+};
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_dmadev_data {
+	void *dev_private;
+	/**< PMD-specific private data.
+	 * This is a copy of the 'dev_private' field in the 'struct rte_dmadev'
+	 * from primary process, it is used by the secondary process to get
+	 * dev_private information.
+	 */
+	uint16_t dev_id; /**< Device [external] identifier. */
+	char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ *
+ * The dataplane APIs are located at the beginning of the structure, along
+ * with the pointer to where all the data elements for the particular device
+ * are stored in shared memory. This split scheme allows the function pointer
+ * and driver data to be per-process, while the actual configuration data for
+ * the device is shared.
+ * And the 'dev_private' field was placed in the first cache line to optimize
+ * performance because the PMD driver mainly depends on this field.
+ */
+struct rte_dmadev {
+	rte_dmadev_copy_t copy;
+	rte_dmadev_copy_sg_t copy_sg;
+	rte_dmadev_fill_t fill;
+	rte_dmadev_submit_t submit;
+	rte_dmadev_completed_t completed;
+	rte_dmadev_completed_status_t completed_status;
+	void *reserved_ptr; /**< Reserved for future IO function. */
+	void *dev_private;
+	/**< PMD-specific private data.
+	 *
+	 * - If is the primary process, after dmadev allocated by
+	 * rte_dmadev_pmd_allocate(), the PCI/SoC device probing should
+	 * initialize this field, and copy it's value to the 'dev_private'
+	 * field of 'struct rte_dmadev_data' which pointer by 'data' filed.
+	 *
+	 * - If is the secondary process, dmadev framework will initialize this
+	 * field by copy from 'dev_private' field of 'struct rte_dmadev_data'
+	 * which initialized by primary process.
+	 *
+	 * @note It's the primary process responsibility to deinitialize this
+	 * field after invoke rte_dmadev_pmd_release() in the PCI/SoC device
+	 * removing stage.
+	 */
+	struct rte_dmadev_data *data; /**< Pointer to device data. */
+	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
+	struct rte_device *device;
+	/**< Device info which supplied during device initialization. */
+	enum rte_dmadev_state state; /**< Flag indicating the device state. */
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+} __rte_cache_aligned;
+
+extern struct rte_dmadev rte_dmadevices[];
+
+#endif /* _RTE_DMADEV_CORE_H_ */
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000..45141f9
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_PMD_H_
+#define _RTE_DMADEV_PMD_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device PMD APIs
+ *
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param dev
+ *   Device to be released.
+ *
+ * @return
+ *   - 0 on success, negative on error
+ */
+__rte_internal
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev);
+
+/**
+ * @internal
+ * Return the DMA device based on the device name.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_PMD_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000..0f2ed4b
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,37 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dmadev_close;
+	rte_dmadev_completed;
+	rte_dmadev_completed_status;
+	rte_dmadev_configure;
+	rte_dmadev_copy;
+	rte_dmadev_copy_sg;
+	rte_dmadev_count;
+	rte_dmadev_dump;
+	rte_dmadev_fill;
+	rte_dmadev_get_dev_id;
+	rte_dmadev_info_get;
+	rte_dmadev_is_valid_dev;
+	rte_dmadev_selftest;
+	rte_dmadev_start;
+	rte_dmadev_stats_get;
+	rte_dmadev_stats_reset;
+	rte_dmadev_stop;
+	rte_dmadev_submit;
+	rte_dmadev_vchan_setup;
+
+	local: *;
+};
+
+INTERNAL {
+        global:
+
+	rte_dmadevices;
+	rte_dmadev_get_device_by_name;
+	rte_dmadev_pmd_allocate;
+	rte_dmadev_pmd_release;
+
+	local: *;
+};
+
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4..68d239f 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -60,6 +60,7 @@ libraries = [
         'bpf',
         'graph',
         'node',
+        'dmadev',
 ]
 
 if is_windows
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v8] dmadev: introduce DMA device library
  2021-07-20  1:14 ` [dpdk-dev] [PATCH v8] " Chengwen Feng
@ 2021-07-20  5:03   ` Jerin Jacob
  2021-07-20  6:53     ` fengchengwen
  0 siblings, 1 reply; 339+ messages in thread
From: Jerin Jacob @ 2021-07-20  5:03 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: Thomas Monjalon, Ferruh Yigit, Richardson, Bruce, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On Tue, Jul 20, 2021 at 6:48 AM Chengwen Feng <fengchengwen@huawei.com> wrote:
>
> This patch introduce 'dmadevice' which is a generic type of DMA
> device.
>
> The APIs of dmadev library exposes some generic operations which can
> enable configuration and I/O with the DMA devices.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> ---
> +DMA device API - EXPERIMENTAL
> +M: Chengwen Feng <fengchengwen@huawei.com>
> +F: lib/dmadev/
> +
>
>  Memory Pool Drivers
>  -------------------
> diff --git a/config/rte_config.h b/config/rte_config.h
> index 590903c..331a431 100644
> --- a/config/rte_config.h
> +++ b/config/rte_config.h
> @@ -81,6 +81,9 @@
>  /* rawdev defines */
>  #define RTE_RAWDEV_MAX_DEVS 64
>
> +/* dmadev defines */
> +#define RTE_DMADEV_MAX_DEVS 64
> +
>  /* ip_fragmentation defines */
>  #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
>  #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
> diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
> index 1992107..ce08250 100644
> --- a/doc/api/doxy-api-index.md
> +++ b/doc/api/doxy-api-index.md
> @@ -27,6 +27,7 @@ The public API headers are grouped by topics:
>    [event_timer_adapter]    (@ref rte_event_timer_adapter.h),
>    [event_crypto_adapter]   (@ref rte_event_crypto_adapter.h),
>    [rawdev]             (@ref rte_rawdev.h),
> +  [dmadev]             (@ref rte_dmadev.h),
>    [metrics]            (@ref rte_metrics.h),
>    [bitrate]            (@ref rte_bitrate.h),
>    [latency]            (@ref rte_latencystats.h),
> diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
> index 325a019..109ec1f 100644
> --- a/doc/api/doxy-api.conf.in
> +++ b/doc/api/doxy-api.conf.in
> @@ -35,6 +35,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
>                            @TOPDIR@/lib/compressdev \
>                            @TOPDIR@/lib/cryptodev \
>                            @TOPDIR@/lib/distributor \
> +                          @TOPDIR@/lib/dmadev \
>                            @TOPDIR@/lib/efd \
>                            @TOPDIR@/lib/ethdev \
>                            @TOPDIR@/lib/eventdev \
> diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
> new file mode 100644
> index 0000000..d2fc85e
> --- /dev/null
> +++ b/lib/dmadev/meson.build
> @@ -0,0 +1,7 @@
> +# SPDX-License-Identifier: BSD-3-Clause
> +# Copyright(c) 2021 HiSilicon Limited.
> +
> +sources = files('rte_dmadev.c')
> +headers = files('rte_dmadev.h')
> +indirect_headers += files('rte_dmadev_core.h')
> +driver_sdk_headers += files('rte_dmadev_pmd.h')
> diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
> new file mode 100644
> index 0000000..a6d8e3be
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev.c
> @@ -0,0 +1,545 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + * Copyright(c) 2021 Intel Corporation.
> + */
> +
> +#include <ctype.h>
> +#include <inttypes.h>
> +#include <stdint.h>
> +#include <stdio.h>
> +#include <stdlib.h>
> +#include <string.h>
> +
> +#include <rte_debug.h>
> +#include <rte_dev.h>
> +#include <rte_eal.h>
> +#include <rte_errno.h>
> +#include <rte_lcore.h>
> +#include <rte_log.h>
> +#include <rte_memory.h>
> +#include <rte_memzone.h>
> +#include <rte_malloc.h>
> +#include <rte_string_fns.h>
> +
> +#include "rte_dmadev.h"
> +#include "rte_dmadev_pmd.h"
> +
> +struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
> +
> +static const char *MZ_RTE_DMADEV_DATA = "rte_dmadev_data";


You may change MZ_RTE_DMADEV_DATA as mz_rte_dmadev_data


> +/* Shared memory between primary and secondary processes. */
> +static struct {
> +       struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
> +} *dmadev_shared_data;
> +
> +RTE_LOG_REGISTER_DEFAULT(rte_dmadev_logtype, INFO);
> +#define RTE_DMADEV_LOG(level, ...) \
> +       rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "" __VA_ARGS__)
> +
> +/* Macros to check for valid device id */
> +#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
> +       if (!rte_dmadev_is_valid_dev(dev_id)) { \
> +               RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
> +               return retval; \
> +       } \
> +} while (0)
> +
> +static int
> +dmadev_check_name(const char *name)
> +{
> +       size_t name_len;
> +
> +       if (name == NULL) {
> +               RTE_DMADEV_LOG(ERR, "Name can't be NULL\n");
> +               return -EINVAL;
> +       }
> +
> +       name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
> +       if (name_len == 0) {
> +               RTE_DMADEV_LOG(ERR, "Zero length DMA device name\n");
> +               return -EINVAL;
> +       }
> +       if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
> +               RTE_DMADEV_LOG(ERR, "DMA device name is too long\n");
> +               return -EINVAL;
> +       }
> +
> +       return 0;
> +}
> +
> +static uint16_t
> +dmadev_find_free_dev(void)
> +{
> +       uint16_t i;
> +
> +       for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
> +               if (dmadev_shared_data->data[i].dev_name[0] == '\0') {

Please check, are you writing \0 on freeing the device or not?

> +                       RTE_ASSERT(rte_dmadevices[i].state ==
> +                                  RTE_DMADEV_UNUSED);

Please remove RTE_ASSERT from the library.


> +                       return i;
> +               }
> +       }
> +
> +       return RTE_DMADEV_MAX_DEVS;
> +}
> +
> +static struct rte_dmadev*
> +dmadev_find(const char *name)
> +{
> +       uint16_t i;
> +
> +       for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
> +               if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&
> +                   (!strcmp(name, rte_dmadevices[i].data->dev_name)))
> +                       return &rte_dmadevices[i];
> +       }
> +
> +       return NULL;
> +}
> +
> +static int
> +dmadev_shared_data_prepare(void)
> +{
> +       const struct rte_memzone *mz;
> +
> +       if (dmadev_shared_data == NULL) {
> +               if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
> +                       /* Allocate port data and ownership shared memory. */
> +                       mz = rte_memzone_reserve(MZ_RTE_DMADEV_DATA,
> +                                        sizeof(*dmadev_shared_data),
> +                                        rte_socket_id(), 0);
> +               } else
> +                       mz = rte_memzone_lookup(MZ_RTE_DMADEV_DATA);
> +               if (mz == NULL)
> +                       return -ENOMEM;
> +
> +               dmadev_shared_data = mz->addr;
> +               if (rte_eal_process_type() == RTE_PROC_PRIMARY)
> +                       memset(dmadev_shared_data->data, 0,
> +                              sizeof(dmadev_shared_data->data));
> +       }
> +
> +       return 0;
> +}
> +
> +static struct rte_dmadev *
> +dmadev_allocate(const char *name)
> +{
> +       struct rte_dmadev *dev;
> +       uint16_t dev_id;
> +
> +       dev = dmadev_find(name);
> +       if (dev != NULL) {
> +               RTE_DMADEV_LOG(ERR, "DMA device already allocated\n");
> +               return NULL;
> +       }
> +
> +       if (dmadev_shared_data_prepare() != 0) {
> +               RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
> +               return NULL;
> +       }
> +
> +       dev_id = dmadev_find_free_dev();
> +       if (dev_id == RTE_DMADEV_MAX_DEVS) {
> +               RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices\n");
> +               return NULL;
> +       }
> +
> +       dev = &rte_dmadevices[dev_id];
> +       dev->data = &dmadev_shared_data->data[dev_id];
> +       dev->data->dev_id = dev_id;
> +       rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
> +
> +       return dev;
> +}
> +
> +static struct rte_dmadev *
> +dmadev_attach_secondary(const char *name)
> +{
> +       struct rte_dmadev *dev;
> +       uint16_t i;
> +
> +       if (dmadev_shared_data_prepare() != 0) {
> +               RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
> +               return NULL;
> +       }
> +
> +       for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
> +               if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
> +                       break;
> +       }
> +       if (i == RTE_DMADEV_MAX_DEVS) {
> +               RTE_DMADEV_LOG(ERR,
> +                       "Device %s is not driven by the primary process\n",
> +                       name);
> +               return NULL;
> +       }
> +
> +       dev = &rte_dmadevices[i];
> +       dev->data = &dmadev_shared_data->data[i];
> +       RTE_ASSERT(dev->data->dev_id == i);

Remove ASSERT from library.

> +       dev->dev_private = dev->data->dev_private;
> +
> +       return dev;
> +}
> +
> +struct rte_dmadev *
> +rte_dmadev_pmd_allocate(const char *name)
> +{
> +       struct rte_dmadev *dev;
> +
> +       if (dmadev_check_name(name) != 0)
> +               return NULL;
> +
> +       if (rte_eal_process_type() == RTE_PROC_PRIMARY)
> +               dev = dmadev_allocate(name);
> +       else
> +               dev = dmadev_attach_secondary(name);
> +
> +       if (dev == NULL)
> +               return NULL;
> +       dev->state = RTE_DMADEV_ATTACHED;
> +
> +       return dev;
> +}
> +
> +int
> +rte_dmadev_pmd_release(struct rte_dmadev *dev)
> +{
> +       void *dev_private_bak;


How about bak -> tmp

> +
> +       if (dev == NULL)
> +               return -EINVAL;
> +
> +       if (dev->state == RTE_DMADEV_UNUSED)
> +               return 0;
> +
> +       if (rte_eal_process_type() == RTE_PROC_PRIMARY)
> +               memset(dev->data, 0, sizeof(struct rte_dmadev_data));
> +
> +       dev_private_bak = dev->dev_private;
> +       memset(dev, 0, sizeof(struct rte_dmadev));
> +       if (rte_eal_process_type() == RTE_PROC_PRIMARY)
> +               dev->dev_private = dev_private_bak;
> +       dev->state = RTE_DMADEV_UNUSED;
> +
> +       return 0;
> +}
> +
> +struct rte_dmadev *
> +rte_dmadev_get_device_by_name(const char *name)
> +{
> +       if (dmadev_check_name(name) != 0)
> +               return NULL;
> +       return dmadev_find(name);
> +}
> +
> +int
> +rte_dmadev_get_dev_id(const char *name)
> +{
> +       struct rte_dmadev *dev = rte_dmadev_get_device_by_name(name);
> +       if (dev != NULL)
> +               return dev->data->dev_id;
> +       return -EINVAL;
> +}
> +
> +bool
> +rte_dmadev_is_valid_dev(uint16_t dev_id)
> +{
> +       return (dev_id < RTE_DMADEV_MAX_DEVS) &&
> +               rte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED;
> +}
> +
> +uint16_t
> +rte_dmadev_count(void)
> +{
> +       uint16_t count = 0;
> +       uint16_t i;
> +
> +       for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
> +               if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
> +                       count++;
> +       }
> +
> +       return count;
> +}
> +
> +int
> +rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
> +{
> +       const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       int ret;
> +
> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +       if (dev_info == NULL)
> +               return -EINVAL;
> +
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
> +       memset(dev_info, 0, sizeof(struct rte_dmadev_info));
> +       ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
> +                                           sizeof(struct rte_dmadev_info));
> +       if (ret != 0)
> +               return ret;
> +
> +       dev_info->device = dev->device;
> +       dev_info->nb_vchans = dev->data->dev_conf.max_vchans;

This will be updated after configure stage.


> +
> +       return 0;
> +}
> +
> +int
> +rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       struct rte_dmadev_info info;
> +       int ret;
> +
> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +       if (dev_conf == NULL)
> +               return -EINVAL;
> +
> +       ret = rte_dmadev_info_get(dev_id, &info);
> +       if (ret != 0) {
> +               RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
> +               return -EINVAL;
> +       }
> +       if (dev_conf->max_vchans == 0) {
> +               RTE_DMADEV_LOG(ERR,
> +                       "Device %u configure zero vchans\n", dev_id);
> +               return -EINVAL;
> +       }
> +       if (dev_conf->max_vchans > info.max_vchans) {
> +               RTE_DMADEV_LOG(ERR,
> +                       "Device %u configure too many vchans\n", dev_id);
> +               return -EINVAL;
> +       }
> +       if (dev_conf->enable_silent &&
> +           !(info.dev_capa & RTE_DMADEV_CAPA_SILENT)) {
> +               RTE_DMADEV_LOG(ERR, "Device %u don't support silent\n", dev_id);
> +               return -EINVAL;
> +       }
> +
> +       if (dev->data->dev_started != 0) {
> +               RTE_DMADEV_LOG(ERR,
> +                       "Device %u must be stopped to allow configuration\n",
> +                       dev_id);
> +               return -EBUSY;
> +       }

ethdev and other device class common code handles the reconfigure case. i.e
the application configures N vchan first and reconfigures to N - M
then free the resources
attached to M - N. Please do the same here.



> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
> +       ret = (*dev->dev_ops->dev_configure)(dev, dev_conf);
> +       if (ret == 0)
> +               memcpy(&dev->data->dev_conf, dev_conf, sizeof(*dev_conf));
> +
> +       return ret;
> +}
> +
> +int
> +rte_dmadev_start(uint16_t dev_id)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       int ret;
> +
> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +       if (dev->data->dev_started != 0) {
> +               RTE_DMADEV_LOG(WARNING, "Device %u already started\n", dev_id);
> +               return 0;
> +       }
> +
> +       if (dev->dev_ops->dev_start == NULL)
> +               goto mark_started;
> +
> +       ret = (*dev->dev_ops->dev_start)(dev);
> +       if (ret != 0)
> +               return ret;
> +
> +mark_started:
> +       dev->data->dev_started = 1;
> +       return 0;
> +}
> +
> +int
> +rte_dmadev_stop(uint16_t dev_id)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       int ret;
> +
> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +       if (dev->data->dev_started == 0) {
> +               RTE_DMADEV_LOG(WARNING, "Device %u already stopped\n", dev_id);
> +               return 0;
> +       }
> +
> +       if (dev->dev_ops->dev_stop == NULL)
> +               goto mark_stopped;
> +
> +       ret = (*dev->dev_ops->dev_stop)(dev);
> +       if (ret != 0)
> +               return ret;
> +
> +mark_stopped:
> +       dev->data->dev_started = 0;
> +       return 0;
> +}
> +
> +int
> +rte_dmadev_close(uint16_t dev_id)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +       /* Device must be stopped before it can be closed */
> +       if (dev->data->dev_started == 1) {
> +               RTE_DMADEV_LOG(ERR,
> +                       "Device %u must be stopped before closing\n", dev_id);
> +               return -EBUSY;
> +       }
> +
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
> +       return (*dev->dev_ops->dev_close)(dev);
> +}
> +
> +int
> +rte_dmadev_vchan_setup(uint16_t dev_id,
> +                      const struct rte_dmadev_vchan_conf *conf)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       struct rte_dmadev_info info;
> +       int ret;
> +
> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +       if (conf == NULL)
> +               return -EINVAL;
> +
> +       dev = &rte_dmadevices[dev_id];
> +
> +       ret = rte_dmadev_info_get(dev_id, &info);
> +       if (ret != 0) {
> +               RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
> +               return -EINVAL;
> +       }
> +       if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
> +           conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
> +           conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
> +           conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
> +               RTE_DMADEV_LOG(ERR, "Device %u direction invalid!\n", dev_id);
> +               return -EINVAL;
> +       }
> +       if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
> +           !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_MEM)) {
> +               RTE_DMADEV_LOG(ERR,
> +                       "Device %u don't support mem2mem transfer\n", dev_id);
> +               return -EINVAL;
> +       }
> +       if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
> +           !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_DEV)) {
> +               RTE_DMADEV_LOG(ERR,
> +                       "Device %u don't support mem2dev transfer\n", dev_id);
> +               return -EINVAL;
> +       }
> +       if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
> +           !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_MEM)) {
> +               RTE_DMADEV_LOG(ERR,
> +                       "Device %u don't support dev2mem transfer\n", dev_id);
> +               return -EINVAL;
> +       }
> +       if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
> +           !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_DEV)) {
> +               RTE_DMADEV_LOG(ERR,
> +                       "Device %u don't support dev2dev transfer\n", dev_id);
> +               return -EINVAL;
> +       }
> +       if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
> +               RTE_DMADEV_LOG(ERR,
> +                       "Device %u number of descriptors invalid\n", dev_id);
> +               return -EINVAL;
> +       }

Missing sanity check on src and dst port incase if the direction is
based on device.

> +
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
> +       return (*dev->dev_ops->vchan_setup)(dev, conf);
> +}
> +
> +int
> +rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
> +                    struct rte_dmadev_stats *stats)
> +{
> +       const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +       if (stats == NULL)
> +               return -EINVAL;
> +       if (vchan >= dev->data->dev_conf.max_vchans &&
> +           vchan != RTE_DMADEV_ALL_VCHAN) {
> +               RTE_DMADEV_LOG(ERR,
> +                       "Device %u vchan %u out of range\n", dev_id, vchan);
> +               return -EINVAL;
> +       }
> +
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
> +       memset(stats, 0, sizeof(struct rte_dmadev_stats));
> +       return (*dev->dev_ops->stats_get)(dev, vchan, stats,
> +                                         sizeof(struct rte_dmadev_stats));
> +}
> +
> +int
> +rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +       if (vchan >= dev->data->dev_conf.max_vchans &&
> +           vchan != RTE_DMADEV_ALL_VCHAN) {
> +               RTE_DMADEV_LOG(ERR,
> +                       "Device %u vchan %u out of range\n", dev_id, vchan);
> +               return -EINVAL;
> +       }
> +
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
> +       return (*dev->dev_ops->stats_reset)(dev, vchan);
> +}
> +
> +int
> +rte_dmadev_dump(uint16_t dev_id, FILE *f)
> +{
> +       const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       struct rte_dmadev_info info;
> +       int ret;
> +
> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +       if (f == NULL)
> +               return -EINVAL;
> +
> +       ret = rte_dmadev_info_get(dev_id, &info);
> +       if (ret != 0) {
> +               RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
> +               return -EINVAL;
> +       }
> +
> +       fprintf(f, "DMA Dev %u, '%s' [%s]\n",
> +               dev->data->dev_id,
> +               dev->data->dev_name,
> +               dev->data->dev_started ? "started" : "stopped");
> +       fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
> +       fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
> +       fprintf(f, "  max_vchans_configured: %u\n", info.nb_vchans);
> +       fprintf(f, "  silent_mode: %s\n",
> +               dev->data->dev_conf.enable_silent ? "on" : "off");

Probably iterate over each vchan and dumping the at least direction
will be usefull.

> +
> +       if (dev->dev_ops->dev_dump != NULL)
> +               return (*dev->dev_ops->dev_dump)(dev, f);
> +
> +       return 0;
> +}
> +
> +int
> +rte_dmadev_selftest(uint16_t dev_id)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
> +       return (*dev->dev_ops->dev_selftest)(dev_id);
> +}
> diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
> new file mode 100644
> index 0000000..53d85f1
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev.h
> @@ -0,0 +1,1041 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + * Copyright(c) 2021 Intel Corporation.
> + * Copyright(c) 2021 Marvell International Ltd.
> + * Copyright(c) 2021 SmartShare Systems.
> + */
> +
> +#ifndef _RTE_DMADEV_H_
> +#define _RTE_DMADEV_H_
> +
> +/**
> + * @file rte_dmadev.h
> + *
> + * RTE DMA (Direct Memory Access) device APIs.
> + *
> + * The DMA framework is built on the following model:
> + *
> + *     ---------------   ---------------       ---------------
> + *     | virtual DMA |   | virtual DMA |       | virtual DMA |
> + *     | channel     |   | channel     |       | channel     |
> + *     ---------------   ---------------       ---------------
> + *            |                |                      |
> + *            ------------------                      |
> + *                     |                              |
> + *               ------------                    ------------
> + *               |  dmadev  |                    |  dmadev  |
> + *               ------------                    ------------
> + *                     |                              |
> + *            ------------------               ------------------
> + *            | HW-DMA-channel |               | HW-DMA-channel |
> + *            ------------------               ------------------
> + *                     |                              |
> + *                     --------------------------------
> + *                                     |
> + *                           ---------------------
> + *                           | HW-DMA-Controller |
> + *                           ---------------------
> + *
> + * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
> + * each HW-DMA-channel should be represented by a dmadev.
> + *
> + * The dmadev could create multiple virtual DMA channels, each virtual DMA
> + * channel represents a different transfer context. The DMA operation request
> + * must be submitted to the virtual DMA channel. e.g. Application could create
> + * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
> + * virtual DMA channel 1 for memory-to-device transfer scenario.
> + *
> + * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
> + * PCI/SoC device probing phase performed at EAL initialization time. And could
> + * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
> + * phase.
> + *
> + * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
> + * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
> + *
> + * The functions exported by the dmadev API to setup a device designated by its
> + * device identifier must be invoked in the following order:
> + *     - rte_dmadev_configure()
> + *     - rte_dmadev_vchan_setup()
> + *     - rte_dmadev_start()
> + *
> + * Then, the application can invoke dataplane APIs to process jobs.
> + *
> + * If the application wants to change the configuration (i.e. invoke
> + * rte_dmadev_configure() or rte_dmadev_vchan_setup()), it must invoke
> + * rte_dmadev_stop() first to stop the device and then do the reconfiguration
> + * before invoking rte_dmadev_start() again. The dataplane APIs should not be
> + * invoked when the device is stopped.
> + *
> + * Finally, an application can close a dmadev by invoking the
> + * rte_dmadev_close() function.
> + *
> + * The dataplane APIs include two parts:
> + * The first part is the submission of operation requests:
> + *     - rte_dmadev_copy()
> + *     - rte_dmadev_copy_sg()
> + *     - rte_dmadev_fill()
> + *     - rte_dmadev_submit()
> + *
> + * These APIs could work with different virtual DMA channels which have
> + * different contexts.
> + *
> + * The first three APIs are used to submit the operation request to the virtual
> + * DMA channel, if the submission is successful, a uint16_t ring_idx is
> + * returned, otherwise a negative number is returned.
> + *
> + * The last API was used to issue doorbell to hardware, and also there are flags
> + * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
> + * same work.
> + *
> + * The second part is to obtain the result of requests:
> + *     - rte_dmadev_completed()
> + *         - return the number of operation requests completed successfully.
> + *     - rte_dmadev_completed_status()
> + *         - return the number of operation requests completed.
> + *
> + * @note If the dmadev works in silent mode (@see RTE_DMADEV_CAPA_SILENT),
> + * application does not invoke the above two completed APIs.
> + *
> + * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
> + * rte_dmadev_fill()) returned, the rules are as follows:
> + *     - ring_idx for each virtual DMA channel are independent.
> + *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
> + *       when it reach UINT16_MAX, it wraps back to zero.
> + *     - This ring_idx can be used by applications to track per-operation
> + *       metadata in an application-defined circular ring.
> + *     - The initial ring_idx of a virtual DMA channel is zero, after the
> + *       device is stopped, the ring_idx needs to be reset to zero.
> + *
> + * One example:
> + *     - step-1: start one dmadev
> + *     - step-2: enqueue a copy operation, the ring_idx return is 0
> + *     - step-3: enqueue a copy operation again, the ring_idx return is 1
> + *     - ...
> + *     - step-101: stop the dmadev
> + *     - step-102: start the dmadev
> + *     - step-103: enqueue a copy operation, the cookie return is 0
> + *     - ...
> + *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
> + *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
> + *     - ...
> + *
> + * The DMA operation address used in enqueue APIs (i.e. rte_dmadev_copy(),
> + * rte_dmadev_copy_sg(), rte_dmadev_fill()) defined as rte_iova_t type. The
> + * dmadev supports two types of address: memory address and device address.
> + *
> + * - memory address: the source and destination address of the memory-to-memory
> + * transfer type, or the source address of the memory-to-device transfer type,
> + * or the destination address of the device-to-memory transfer type.
> + * @note If the device support SVA (@see RTE_DMADEV_CAPA_SVA), the memory
> + * address can be any VA address, otherwise it must be an IOVA address.
> + *
> + * - device address: the source and destination address of the device-to-device
> + * transfer type, or the source address of the device-to-memory transfer type,
> + * or the destination address of the memory-to-device transfer type.
> + *
> + * By default, all the functions of the dmadev API exported by a PMD are
> + * lock-free functions which assume to not be invoked in parallel on different
> + * logical cores to work on the same target dmadev object.
> + * @note Different virtual DMA channels on the same dmadev *DO NOT* support
> + * parallel invocation because these virtual DMA channels share the same
> + * HW-DMA-channel.
> + *
> + */
> +
> +#include <rte_common.h>
> +#include <rte_compat.h>
> +#include <rte_dev.h>
> +#include <rte_errno.h>
> +#include <rte_memory.h>
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +#define RTE_DMADEV_NAME_MAX_LEN        RTE_DEV_NAME_MAX_LEN
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Get the device identifier for the named DMA device.
> + *
> + * @param name
> + *   DMA device name.
> + *
> + * @return
> + *   Returns DMA device identifier on success.
> + *   - <0: Failure to find named DMA device.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_get_dev_id(const char *name);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * @param dev_id
> + *   DMA device index.
> + *
> + * @return
> + *   - If the device index is valid (true) or not (false).
> + */
> +__rte_experimental
> +bool
> +rte_dmadev_is_valid_dev(uint16_t dev_id);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Get the total number of DMA devices that have been successfully
> + * initialised.
> + *
> + * @return
> + *   The total number of usable DMA devices.
> + */
> +__rte_experimental
> +uint16_t
> +rte_dmadev_count(void);
> +
> +/* Enumerates DMA device capabilities. */
> +#define RTE_DMADEV_CAPA_MEM_TO_MEM     (1ull << 0)
> +/**< DMA device support memory-to-memory transfer.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +
> +#define RTE_DMADEV_CAPA_MEM_TO_DEV     (1ull << 1)
> +/**< DMA device support memory-to-device transfer.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + * @see struct rte_dmadev_port_param::port_type
> + */
> +
> +#define RTE_DMADEV_CAPA_DEV_TO_MEM     (1ull << 2)
> +/**< DMA device support device-to-memory transfer.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + * @see struct rte_dmadev_port_param::port_type
> + */
> +
> +#define RTE_DMADEV_CAPA_DEV_TO_DEV     (1ull << 3)
> +/**< DMA device support device-to-device transfer.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + * @see struct rte_dmadev_port_param::port_type
> + */
> +
> +#define RTE_DMADEV_CAPA_SVA            (1ull << 4)
> +/**< DMA device support SVA which could use VA as DMA address.
> + * If device support SVA then application could pass any VA address like memory
> + * from rte_malloc(), rte_memzone(), malloc, stack memory.
> + * If device don't support SVA, then application should pass IOVA address which
> + * from rte_malloc(), rte_memzone().
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +
> +#define RTE_DMADEV_CAPA_SILENT         (1ull << 5)
> +/**< DMA device support work in silent mode.
> + * In this mode, application don't required to invoke rte_dmadev_completed*()
> + * API.
> + *
> + * @see struct rte_dmadev_conf::silent_mode
> + */
> +
> +#define RTE_DMADEV_CAPA_OPS_COPY       (1ull << 32)
> +/**< DMA device support copy ops.
> + * This capability start with index of 32, so that it could leave gap between
> + * normal capability and ops capability.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +
> +#define RTE_DMADEV_CAPA_OPS_COPY_SG    (1ull << 33)
> +/**< DMA device support scatter-list copy ops.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +
> +#define RTE_DMADEV_CAPA_OPS_FILL       (1ull << 34)
> +/**< DMA device support fill ops.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */
> +
> +/**
> + * A structure used to retrieve the information of a DMA device.
> + */
> +struct rte_dmadev_info {
> +       struct rte_device *device; /**< Generic Device information. */
> +       uint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */
> +       uint16_t max_vchans;
> +       /**< Maximum number of virtual DMA channels supported. */
> +       uint16_t max_desc;
> +       /**< Maximum allowed number of virtual DMA channel descriptors. */
> +       uint16_t min_desc;
> +       /**< Minimum allowed number of virtual DMA channel descriptors. */
> +       uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Retrieve information of a DMA device.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param[out] dev_info
> + *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
> + *   information of the device.
> + *
> + * @return
> + *   - =0: Success, driver updates the information of the DMA device.
> + *   - <0: Error code returned by the driver info get function.
> + *
> + */
> +__rte_experimental
> +int
> +rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
> +
> +/**
> + * A structure used to configure a DMA device.
> + */
> +struct rte_dmadev_conf {
> +       uint16_t max_vchans;
> +       /**< Maximum number of virtual DMA channel to use.
> +        * This value cannot be greater than the field 'max_vchans' of struct
> +        * rte_dmadev_info which get from rte_dmadev_info_get().
> +        */
> +       bool enable_silent;
> +       /**< Indicates whether to enable silent mode.
> +        * false-default mode, true-silent mode.
> +        * This value can be set to true only when the SILENT capability is
> +        * supported.
> +        *
> +        * @see RTE_DMADEV_CAPA_SILENT
> +        */
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Configure a DMA device.
> + *
> + * This function must be invoked first before any other function in the
> + * API. This function can also be re-invoked when a device is in the
> + * stopped state.
> + *
> + * @param dev_id
> + *   The identifier of the device to configure.
> + * @param dev_conf
> + *   The DMA device configuration structure encapsulated into rte_dmadev_conf
> + *   object.
> + *
> + * @return
> + *   - =0: Success, device configured.
> + *   - <0: Error code returned by the driver configuration function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Start a DMA device.
> + *
> + * The device start step is the last one and consists of setting the DMA
> + * to start accepting jobs.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - =0: Success, device started.
> + *   - <0: Error code returned by the driver start function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_start(uint16_t dev_id);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Stop a DMA device.
> + *
> + * The device can be restarted with a call to rte_dmadev_start().
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - =0: Success, device stopped.
> + *   - <0: Error code returned by the driver stop function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_stop(uint16_t dev_id);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Close a DMA device.
> + *
> + * The device cannot be restarted after this call.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *  - =0: Successfully close device
> + *  - <0: Failure to close device
> + */
> +__rte_experimental
> +int
> +rte_dmadev_close(uint16_t dev_id);
> +
> +/**
> + * rte_dma_direction - DMA transfer direction defines.
> + */
> +enum rte_dma_direction {
> +       RTE_DMA_DIR_MEM_TO_MEM,
> +       /**< DMA transfer direction - from memory to memory.
> +        *
> +        * @see struct rte_dmadev_vchan_conf::direction
> +        */
> +       RTE_DMA_DIR_MEM_TO_DEV,
> +       /**< DMA transfer direction - from memory to device.
> +        * In a typical scenario, the SoCs are installed on host servers as
> +        * iNICs through the PCIe interface. In this case, the SoCs works in
> +        * EP(endpoint) mode, it could initiate a DMA move request from memory
> +        * (which is SoCs memory) to device (which is host memory).
> +        *
> +        * @see struct rte_dmadev_vchan_conf::direction
> +        */
> +       RTE_DMA_DIR_DEV_TO_MEM,
> +       /**< DMA transfer direction - from device to memory.
> +        * In a typical scenario, the SoCs are installed on host servers as
> +        * iNICs through the PCIe interface. In this case, the SoCs works in
> +        * EP(endpoint) mode, it could initiate a DMA move request from device
> +        * (which is host memory) to memory (which is SoCs memory).
> +        *
> +        * @see struct rte_dmadev_vchan_conf::direction
> +        */
> +       RTE_DMA_DIR_DEV_TO_DEV,
> +       /**< DMA transfer direction - from device to device.
> +        * In a typical scenario, the SoCs are installed on host servers as
> +        * iNICs through the PCIe interface. In this case, the SoCs works in
> +        * EP(endpoint) mode, it could initiate a DMA move request from device
> +        * (which is host memory) to the device (which is another host memory).
> +        *
> +        * @see struct rte_dmadev_vchan_conf::direction
> +        */
> +};
> +
> +/**
> + * enum rte_dmadev_port_type - DMA access port type defines.
> + *
> + * @see struct rte_dmadev_port_param::port_type
> + */
> +enum rte_dmadev_port_type {
> +       RTE_DMADEV_PORT_NONE,
> +       RTE_DMADEV_PORT_PCIE, /**< The DMA access port is PCIe. */
> +};
> +
> +/**
> + * A structure used to descript DMA access port parameters.
> + *
> + * @see struct rte_dmadev_vchan_conf::src_port
> + * @see struct rte_dmadev_vchan_conf::dst_port
> + */
> +struct rte_dmadev_port_param {
> +       enum rte_dmadev_port_type port_type;
> +       /**< The device access port type.
> +        * @see enum rte_dmadev_port_type
> +        */
> +       union {
> +               /** PCIe access port parameters.
> +                *
> +                * The following model shows SoC's PCIe module connects to
> +                * multiple PCIe hosts and multiple endpoints. The PCIe module
> +                * has an integrated DMA controller.
> +                *
> +                * If the DMA wants to access the memory of host A, it can be
> +                * initiated by PF1 in core0, or by VF0 of PF0 in core0.
> +                *
> +                * \code{.unparsed}
> +                * System Bus
> +                *    |     ----------PCIe module----------
> +                *    |     Bus
> +                *    |     Interface
> +                *    |     -----        ------------------
> +                *    |     |   |        | PCIe Core0     |
> +                *    |     |   |        |                |        -----------
> +                *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
> +                *    |     |   |--------|        |- VF-1 |--------| Root    |
> +                *    |     |   |        |   PF-1         |        | Complex |
> +                *    |     |   |        |   PF-2         |        -----------
> +                *    |     |   |        ------------------
> +                *    |     |   |
> +                *    |     |   |        ------------------
> +                *    |     |   |        | PCIe Core1     |
> +                *    |     |   |        |                |        -----------
> +                *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
> +                *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
> +                *    |     |   |        |        |- VF-1 |        | Complex |
> +                *    |     |   |        |   PF-2         |        -----------
> +                *    |     |   |        ------------------
> +                *    |     |   |
> +                *    |     |   |        ------------------
> +                *    |     |DMA|        |                |        ------
> +                *    |     |   |        |                |--------| EP |
> +                *    |     |   |--------| PCIe Core2     |        ------
> +                *    |     |   |        |                |        ------
> +                *    |     |   |        |                |--------| EP |
> +                *    |     |   |        |                |        ------
> +                *    |     -----        ------------------
> +                *
> +                * \endcode
> +                *
> +                * @note If some fields can not be supported by the
> +                * hardware/driver, then the driver ignores those fields.
> +                * Please check driver-specific documentation for limitations
> +                * and capablites.
> +                */
> +               struct {
> +                       uint64_t coreid : 4; /**< PCIe core id used. */
> +                       uint64_t pfid : 8; /**< PF id used. */
> +                       uint64_t vfen : 1; /**< VF enable bit. */
> +                       uint64_t vfid : 16; /**< VF id used. */
> +                       uint64_t pasid : 20;
> +                       /**< The pasid filed in TLP packet. */
> +                       uint64_t attr : 3;
> +                       /**< The attributes filed in TLP packet. */
> +                       uint64_t ph : 2;
> +                       /**< The processing hint filed in TLP packet. */
> +                       uint64_t st : 16;
> +                       /**< The steering tag filed in TLP packet. */
> +               } pcie;
> +       };
> +       uint64_t reserved[2]; /**< Reserved for future fields. */
> +};
> +
> +/**
> + * A structure used to configure a virtual DMA channel.
> + */
> +struct rte_dmadev_vchan_conf {
> +       enum rte_dma_direction direction;
> +       /**< Transfer direction
> +        * @see enum rte_dma_direction
> +        */
> +       uint16_t nb_desc;
> +       /**< Number of descriptor for the virtual DMA channel */
> +       struct rte_dmadev_port_param src_port;
> +       /**< 1) Used to describes the device access port parameter in the
> +        * device-to-memory transfer scenario.
> +        * 2) Used to describes the source device access port parameter in the
> +        * device-to-device transfer scenario.
> +        * @see struct rte_dmadev_port_param
> +        */
> +       struct rte_dmadev_port_param dst_port;
> +       /**< 1) Used to describes the device access port parameter in the
> +        * memory-to-device transfer scenario.
> +        * 2) Used to describes the destination device access port parameter in
> +        * the device-to-device transfer scenario.
> +        * @see struct rte_dmadev_port_param
> +        */
> +};
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Allocate and set up a virtual DMA channel.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param conf
> + *   The virtual DMA channel configuration structure encapsulated into
> + *   rte_dmadev_vchan_conf object.
> + *
> + * @return
> + *   - >=0: Allocate success, it is the virtual DMA channel id. This value must
> + *          be less than the field 'max_vchans' of struct rte_dmadev_conf
> + *          which configured by rte_dmadev_configure().
> + *   - <0: Error code returned by the driver virtual channel setup function.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_vchan_setup(uint16_t dev_id,
> +                      const struct rte_dmadev_vchan_conf *conf);
> +
> +/**
> + * rte_dmadev_stats - running statistics.
> + */
> +struct rte_dmadev_stats {
> +       uint64_t submitted_count;
> +       /**< Count of operations which were submitted to hardware. */
> +       uint64_t completed_fail_count;
> +       /**< Count of operations which failed to complete. */
> +       uint64_t completed_count;
> +       /**< Count of operations which successfully complete. */
> +};
> +
> +#define RTE_DMADEV_ALL_VCHAN   0xFFFFu
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Retrieve basic statistics of a or all virtual DMA channel(s).
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
> + * @param[out] stats
> + *   The basic statistics structure encapsulated into rte_dmadev_stats
> + *   object.
> + *
> + * @return
> + *   - =0: Successfully retrieve stats.
> + *   - <0: Failure to retrieve stats.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
> +                    struct rte_dmadev_stats *stats);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Reset basic statistics of a or all virtual DMA channel(s).
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
> + *
> + * @return
> + *   - =0: Successfully reset stats.
> + *   - <0: Failure to reset stats.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Dump DMA device info.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param f
> + *   The file to write the output to.
> + *
> + * @return
> + *   0 on success. Non-zero otherwise.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_dump(uint16_t dev_id, FILE *f);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Trigger the dmadev self test.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   - 0: Selftest successful.
> + *   - -ENOTSUP if the device doesn't support selftest
> + *   - other values < 0 on failure.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_selftest(uint16_t dev_id);
> +
> +/**
> + * rte_dma_status_code - DMA transfer result status code defines.
> + */
> +enum rte_dma_status_code {
> +       RTE_DMA_STATUS_SUCCESSFUL,
> +       /**< The operation completed successfully. */
> +       RTE_DMA_STATUS_USRER_ABORT,
> +       /**< The operation failed to complete due abort by user.
> +        * This is mainly used when processing dev_stop, user could modidy the
> +        * descriptors (e.g. change one bit to tell hardware abort this job),
> +        * it allows outstanding requests to be complete as much as possible,
> +        * so reduce the time to stop the device.
> +        */
> +       RTE_DMA_STATUS_NOT_ATTEMPTED,
> +       /**< The operation failed to complete due to following scenarios:
> +        * The jobs in a particular batch are not attempted because they
> +        * appeared after a fence where a previous job failed. In some HW
> +        * implementation it's possible for jobs from later batches would be
> +        * completed, though, so report the status from the not attempted jobs
> +        * before reporting those newer completed jobs.
> +        */
> +       RTE_DMA_STATUS_INVALID_SRC_ADDR,
> +       /**< The operation failed to complete due invalid source address. */
> +       RTE_DMA_STATUS_INVALID_DST_ADDR,
> +       /**< The operation failed to complete due invalid destination
> +        * address.
> +        */
> +       RTE_DMA_STATUS_INVALID_LENGTH,
> +       /**< The operation failed to complete due invalid length. */
> +       RTE_DMA_STATUS_INVALID_OPCODE,
> +       /**< The operation failed to complete due invalid opcode.
> +        * The DMA descriptor could have multiple format, which are
> +        * distinguished by the opcode field.
> +        */
> +       RTE_DMA_STATUS_BUS_ERROR,
> +       /**< The operation failed to complete due bus err. */
> +       RTE_DMA_STATUS_DATA_POISION,
> +       /**< The operation failed to complete due data poison. */
> +       RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
> +       /**< The operation failed to complete due descriptor read error. */
> +       RTE_DMA_STATUS_DEV_LINK_ERROR,
> +       /**< The operation failed to complete due device link error.
> +        * Used to indicates that the link error in the memory-to-device/
> +        * device-to-memory/device-to-device transfer scenario.
> +        */
> +       RTE_DMA_STATUS_UNKNOWN = 0x100,
> +       /**< The operation failed to complete due unknown reason.
> +        * The initial value is 256, which reserves space for future errors.
> +        */
> +};
> +
> +/**
> + * rte_dmadev_sge - can hold scatter DMA operation request entry.
> + */
> +struct rte_dmadev_sge {
> +       rte_iova_t addr; /**< The DMA operation address. */
> +       uint32_t length; /**< The DMA operation length. */
> +};
> +
> +#include "rte_dmadev_core.h"
> +
> +/* DMA flags to augment operation preparation. */
> +#define RTE_DMA_OP_FLAG_FENCE  (1ull << 0)
> +/**< DMA fence flag.
> + * It means the operation with this flag must be processed only after all
> + * previous operations are completed.
> + * If the specify DMA HW works in-order (it means it has default fence between
> + * operations), this flag could be NOP.
> + *
> + * @see rte_dmadev_copy()
> + * @see rte_dmadev_copy_sg()
> + * @see rte_dmadev_fill()
> + */
> +
> +#define RTE_DMA_OP_FLAG_SUBMIT (1ull << 1)
> +/**< DMA submit flag.
> + * It means the operation with this flag must issue doorbell to hardware after
> + * enqueued jobs.
> + */
> +
> +#define RTE_DMA_OP_FLAG_LLC    (1ull << 2)
> +/**< DMA write data to low level cache hint.
> + * Used for performance optimization, this is just a hint, and there is no
> + * capability bit for this, driver should not return error if this flag was set.
> + */
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a copy operation onto the virtual DMA channel.
> + *
> + * This queues up a copy operation to be performed by hardware, if the 'flags'
> + * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
> + * this operation, otherwise do not trigger doorbell.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param src
> + *   The address of the source buffer.
> + * @param dst
> + *   The address of the destination buffer.
> + * @param length
> + *   The length of the data to be copied.
> + * @param flags
> + *   An flags for this operation.
> + *   @see RTE_DMA_OP_FLAG_*
> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued copy job.
> + *   - <0: Error code returned by the driver copy function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
> +               uint32_t length, uint64_t flags)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans || length == 0)
> +               return -EINVAL;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
> +#endif
> +
> +       return (*dev->copy)(dev, vchan, src, dst, length, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a scatter list copy operation onto the virtual DMA channel.
> + *
> + * This queues up a scatter list copy operation to be performed by hardware, if
> + * the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell
> + * to begin this operation, otherwise do not trigger doorbell.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param src
> + *   The pointer of source scatter entry array.
> + * @param dst
> + *   The pointer of destination scatter entry array.
> + * @param nb_src
> + *   The number of source scatter entry.
> + * @param nb_dst
> + *   The number of destination scatter entry.
> + * @param flags
> + *   An flags for this operation.
> + *   @see RTE_DMA_OP_FLAG_*
> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued copy scatterlist job.
> + *   - <0: Error code returned by the driver copy scatterlist function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
> +                  struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
> +                  uint64_t flags)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans ||
> +           src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
> +               return -EINVAL;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
> +#endif
> +
> +       return (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Enqueue a fill operation onto the virtual DMA channel.
> + *
> + * This queues up a fill operation to be performed by hardware, if the 'flags'
> + * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
> + * this operation, otherwise do not trigger doorbell.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param pattern
> + *   The pattern to populate the destination buffer with.
> + * @param dst
> + *   The address of the destination buffer.
> + * @param length
> + *   The length of the destination buffer.
> + * @param flags
> + *   An flags for this operation.
> + *   @see RTE_DMA_OP_FLAG_*
> + *
> + * @return
> + *   - 0..UINT16_MAX: index of enqueued fill job.
> + *   - <0: Error code returned by the driver fill function.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
> +               rte_iova_t dst, uint32_t length, uint64_t flags)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans || length == 0)
> +               return -EINVAL;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
> +#endif
> +
> +       return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Trigger hardware to begin performing enqueued operations.
> + *
> + * This API is used to write the "doorbell" to the hardware to trigger it
> + * to begin the operations previously enqueued by rte_dmadev_copy/fill().
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + *
> + * @return
> + *   - =0: Successfully trigger hardware.
> + *   - <0: Failure to trigger hardware.
> + */
> +__rte_experimental
> +static inline int
> +rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans)
> +               return -EINVAL;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
> +#endif
> +
> +       return (*dev->submit)(dev, vchan);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Returns the number of operations that have been successfully completed.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param nb_cpls
> + *   The maximum number of completed operations that can be processed.
> + * @param[out] last_idx
> + *   The last completed operation's index.
> + *   If not required, NULL can be passed in.
> + * @param[out] has_error
> + *   Indicates if there are transfer error.
> + *   If not required, NULL can be passed in.
> + *
> + * @return
> + *   The number of operations that successfully completed. This return value
> + *   must be less than or equal to the value of nb_cpls.
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
> +                    uint16_t *last_idx, bool *has_error)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       uint16_t idx;
> +       bool err;
> +
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans || nb_cpls == 0)
> +               return 0;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
> +#endif
> +
> +       /* Ensure the pointer values are non-null to simplify drivers.
> +        * In most cases these should be compile time evaluated, since this is
> +        * an inline function.
> +        * - If NULL is explicitly passed as parameter, then compiler knows the
> +        *   value is NULL
> +        * - If address of local variable is passed as parameter, then compiler
> +        *   can know it's non-NULL.
> +        */
> +       if (last_idx == NULL)
> +               last_idx = &idx;
> +       if (has_error == NULL)
> +               has_error = &err;
> +
> +       *has_error = false;
> +       return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
> +}
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Returns the number of operations that have been completed, and the
> + * operations result may succeed or fail.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param nb_cpls
> + *   Indicates the size of status array.
> + * @param[out] last_idx
> + *   The last completed operation's index.
> + *   If not required, NULL can be passed in.
> + * @param[out] status
> + *   The error code of operations that completed.
> + *   @see enum rte_dma_status_code
> + *
> + * @return
> + *   The number of operations that completed. This return value must be less
> + *   than or equal to the value of nb_cpls.
> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
> +                           const uint16_t nb_cpls, uint16_t *last_idx,
> +                           enum rte_dma_status_code *status)
> +{
> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +       uint16_t idx;
> +
> +#ifdef RTE_DMADEV_DEBUG
> +       if (!rte_dmadev_is_valid_dev(dev_id) ||
> +           vchan >= dev->data->dev_conf.max_vchans ||
> +           nb_cpls == 0 || status == NULL)
> +               return 0;
> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
> +#endif
> +
> +       if (last_idx == NULL)
> +               last_idx = &idx;
> +
> +       return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
> +}
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* _RTE_DMADEV_H_ */
> diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
> new file mode 100644
> index 0000000..0122f67
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev_core.h
> @@ -0,0 +1,182 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + * Copyright(c) 2021 Intel Corporation.
> + */
> +
> +#ifndef _RTE_DMADEV_CORE_H_
> +#define _RTE_DMADEV_CORE_H_
> +
> +/**
> + * @file
> + *
> + * RTE DMA Device internal header.
> + *
> + * This header contains internal data types, that are used by the DMA devices
> + * in order to expose their ops to the class.
> + *
> + * Applications should not use these API directly.
> + *
> + */
> +
> +struct rte_dmadev;
> +
> +typedef int (*rte_dmadev_info_get_t)(const struct rte_dmadev *dev,
> +                                    struct rte_dmadev_info *dev_info,
> +                                    uint32_t info_sz);
> +/**< @internal Used to get device information of a device. */
> +
> +typedef int (*rte_dmadev_configure_t)(struct rte_dmadev *dev,
> +                                     const struct rte_dmadev_conf *dev_conf);
> +/**< @internal Used to configure a device. */
> +
> +typedef int (*rte_dmadev_start_t)(struct rte_dmadev *dev);
> +/**< @internal Used to start a configured device. */
> +
> +typedef int (*rte_dmadev_stop_t)(struct rte_dmadev *dev);
> +/**< @internal Used to stop a configured device. */
> +
> +typedef int (*rte_dmadev_close_t)(struct rte_dmadev *dev);
> +/**< @internal Used to close a configured device. */
> +
> +typedef int (*rte_dmadev_vchan_setup_t)(struct rte_dmadev *dev,
> +                               const struct rte_dmadev_vchan_conf *conf);
> +/**< @internal Used to allocate and set up a virtual DMA channel. */
> +
> +typedef int (*rte_dmadev_stats_get_t)(const struct rte_dmadev *dev,
> +                       uint16_t vchan, struct rte_dmadev_stats *stats,
> +                       uint32_t stats_sz);
> +/**< @internal Used to retrieve basic statistics. */
> +
> +typedef int (*rte_dmadev_stats_reset_t)(struct rte_dmadev *dev, uint16_t vchan);
> +/**< @internal Used to reset basic statistics. */
> +
> +typedef int (*rte_dmadev_dump_t)(const struct rte_dmadev *dev, FILE *f);
> +/**< @internal Used to dump internal information. */
> +
> +typedef int (*rte_dmadev_selftest_t)(uint16_t dev_id);
> +/**< @internal Used to start dmadev selftest. */
> +
> +typedef int (*rte_dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
> +                                rte_iova_t src, rte_iova_t dst,
> +                                uint32_t length, uint64_t flags);
> +/**< @internal Used to enqueue a copy operation. */
> +
> +typedef int (*rte_dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
> +                                   const struct rte_dmadev_sge *src,
> +                                   const struct rte_dmadev_sge *dst,
> +                                   uint16_t nb_src, uint16_t nb_dst,
> +                                   uint64_t flags);
> +/**< @internal Used to enqueue a scatter list copy operation. */
> +
> +typedef int (*rte_dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
> +                                uint64_t pattern, rte_iova_t dst,
> +                                uint32_t length, uint64_t flags);
> +/**< @internal Used to enqueue a fill operation. */
> +
> +typedef int (*rte_dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
> +/**< @internal Used to trigger hardware to begin working. */
> +
> +typedef uint16_t (*rte_dmadev_completed_t)(struct rte_dmadev *dev,
> +                               uint16_t vchan, const uint16_t nb_cpls,
> +                               uint16_t *last_idx, bool *has_error);
> +/**< @internal Used to return number of successful completed operations. */
> +
> +typedef uint16_t (*rte_dmadev_completed_status_t)(struct rte_dmadev *dev,
> +                       uint16_t vchan, const uint16_t nb_cpls,
> +                       uint16_t *last_idx, enum rte_dma_status_code *status);
> +/**< @internal Used to return number of completed operations. */
> +
> +/**
> + * Possible states of a DMA device.
> + */
> +enum rte_dmadev_state {
> +       RTE_DMADEV_UNUSED = 0,
> +       /**< Device is unused before being probed. */
> +       RTE_DMADEV_ATTACHED,
> +       /**< Device is attached when allocated in probing. */
> +};
> +
> +/**
> + * DMA device operations function pointer table
> + */
> +struct rte_dmadev_ops {
> +       rte_dmadev_info_get_t dev_info_get;
> +       rte_dmadev_configure_t dev_configure;
> +       rte_dmadev_start_t dev_start;
> +       rte_dmadev_stop_t dev_stop;
> +       rte_dmadev_close_t dev_close;
> +       rte_dmadev_vchan_setup_t vchan_setup;
> +       rte_dmadev_stats_get_t stats_get;
> +       rte_dmadev_stats_reset_t stats_reset;
> +       rte_dmadev_dump_t dev_dump;
> +       rte_dmadev_selftest_t dev_selftest;
> +};
> +
> +/**
> + * @internal
> + * The data part, with no function pointers, associated with each DMA device.
> + *
> + * This structure is safe to place in shared memory to be common among different
> + * processes in a multi-process configuration.
> + */
> +struct rte_dmadev_data {
> +       void *dev_private;
> +       /**< PMD-specific private data.
> +        * This is a copy of the 'dev_private' field in the 'struct rte_dmadev'
> +        * from primary process, it is used by the secondary process to get
> +        * dev_private information.
> +        */
> +       uint16_t dev_id; /**< Device [external] identifier. */
> +       char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
> +       struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
> +       uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
> +       uint64_t reserved[2]; /**< Reserved for future fields */
> +} __rte_cache_aligned;
> +
> +/**
> + * @internal
> + * The generic data structure associated with each DMA device.
> + *
> + * The dataplane APIs are located at the beginning of the structure, along
> + * with the pointer to where all the data elements for the particular device
> + * are stored in shared memory. This split scheme allows the function pointer
> + * and driver data to be per-process, while the actual configuration data for
> + * the device is shared.
> + * And the 'dev_private' field was placed in the first cache line to optimize
> + * performance because the PMD driver mainly depends on this field.
> + */
> +struct rte_dmadev {
> +       rte_dmadev_copy_t copy;
> +       rte_dmadev_copy_sg_t copy_sg;
> +       rte_dmadev_fill_t fill;
> +       rte_dmadev_submit_t submit;
> +       rte_dmadev_completed_t completed;
> +       rte_dmadev_completed_status_t completed_status;
> +       void *reserved_ptr; /**< Reserved for future IO function. */
> +       void *dev_private;
> +       /**< PMD-specific private data.
> +        *
> +        * - If is the primary process, after dmadev allocated by
> +        * rte_dmadev_pmd_allocate(), the PCI/SoC device probing should
> +        * initialize this field, and copy it's value to the 'dev_private'
> +        * field of 'struct rte_dmadev_data' which pointer by 'data' filed.
> +        *
> +        * - If is the secondary process, dmadev framework will initialize this
> +        * field by copy from 'dev_private' field of 'struct rte_dmadev_data'
> +        * which initialized by primary process.
> +        *
> +        * @note It's the primary process responsibility to deinitialize this
> +        * field after invoke rte_dmadev_pmd_release() in the PCI/SoC device
> +        * removing stage.
> +        */
> +       struct rte_dmadev_data *data; /**< Pointer to device data. */
> +       const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
> +       struct rte_device *device;
> +       /**< Device info which supplied during device initialization. */
> +       enum rte_dmadev_state state; /**< Flag indicating the device state. */
> +       uint64_t reserved[2]; /**< Reserved for future fields. */
> +} __rte_cache_aligned;
> +
> +extern struct rte_dmadev rte_dmadevices[];
> +
> +#endif /* _RTE_DMADEV_CORE_H_ */
> diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
> new file mode 100644
> index 0000000..45141f9
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev_pmd.h
> @@ -0,0 +1,72 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + */
> +
> +#ifndef _RTE_DMADEV_PMD_H_
> +#define _RTE_DMADEV_PMD_H_
> +
> +/**
> + * @file
> + *
> + * RTE DMA Device PMD APIs
> + *
> + * Driver facing APIs for a DMA device. These are not to be called directly by
> + * any application.
> + */
> +
> +#include "rte_dmadev.h"
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +/**
> + * @internal
> + * Allocates a new dmadev slot for an DMA device and returns the pointer
> + * to that slot for the driver to use.
> + *
> + * @param name
> + *   DMA device name.
> + *
> + * @return
> + *   A pointer to the DMA device slot case of success,
> + *   NULL otherwise.
> + */
> +__rte_internal
> +struct rte_dmadev *
> +rte_dmadev_pmd_allocate(const char *name);
> +
> +/**
> + * @internal
> + * Release the specified dmadev.
> + *
> + * @param dev
> + *   Device to be released.
> + *
> + * @return
> + *   - 0 on success, negative on error
> + */
> +__rte_internal
> +int
> +rte_dmadev_pmd_release(struct rte_dmadev *dev);
> +
> +/**
> + * @internal
> + * Return the DMA device based on the device name.
> + *
> + * @param name
> + *   DMA device name.
> + *
> + * @return
> + *   A pointer to the DMA device slot case of success,
> + *   NULL otherwise.
> + */
> +__rte_internal
> +struct rte_dmadev *
> +rte_dmadev_get_device_by_name(const char *name);
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +
> +#endif /* _RTE_DMADEV_PMD_H_ */
> diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
> new file mode 100644
> index 0000000..0f2ed4b
> --- /dev/null
> +++ b/lib/dmadev/version.map
> @@ -0,0 +1,37 @@
> +EXPERIMENTAL {
> +       global:
> +
> +       rte_dmadev_close;
> +       rte_dmadev_completed;
> +       rte_dmadev_completed_status;
> +       rte_dmadev_configure;
> +       rte_dmadev_copy;
> +       rte_dmadev_copy_sg;
> +       rte_dmadev_count;
> +       rte_dmadev_dump;
> +       rte_dmadev_fill;
> +       rte_dmadev_get_dev_id;
> +       rte_dmadev_info_get;
> +       rte_dmadev_is_valid_dev;
> +       rte_dmadev_selftest;
> +       rte_dmadev_start;
> +       rte_dmadev_stats_get;
> +       rte_dmadev_stats_reset;
> +       rte_dmadev_stop;
> +       rte_dmadev_submit;
> +       rte_dmadev_vchan_setup;
> +
> +       local: *;
> +};
> +
> +INTERNAL {
> +        global:
> +
> +       rte_dmadevices;
> +       rte_dmadev_get_device_by_name;
> +       rte_dmadev_pmd_allocate;
> +       rte_dmadev_pmd_release;
> +
> +       local: *;
> +};
> +
> diff --git a/lib/meson.build b/lib/meson.build
> index 1673ca4..68d239f 100644
> --- a/lib/meson.build
> +++ b/lib/meson.build
> @@ -60,6 +60,7 @@ libraries = [
>          'bpf',
>          'graph',
>          'node',
> +        'dmadev',
>  ]
>
>  if is_windows
> --
> 2.8.1
>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v8] dmadev: introduce DMA device library
  2021-07-20  5:03   ` Jerin Jacob
@ 2021-07-20  6:53     ` fengchengwen
  2021-07-20  9:43       ` Jerin Jacob
  2021-07-20 10:13       ` Bruce Richardson
  0 siblings, 2 replies; 339+ messages in thread
From: fengchengwen @ 2021-07-20  6:53 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Thomas Monjalon, Ferruh Yigit, Richardson, Bruce, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

Thanks Jerin, comment inline

On 2021/7/20 13:03, Jerin Jacob wrote:
> On Tue, Jul 20, 2021 at 6:48 AM Chengwen Feng <fengchengwen@huawei.com> wrote:
>>
>> This patch introduce 'dmadevice' which is a generic type of DMA
>> device.
>>
>> The APIs of dmadev library exposes some generic operations which can
>> enable configuration and I/O with the DMA devices.
>>
>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>

[snip]

>> +int
>> +rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
>> +{
>> +       const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
>> +       int ret;
>> +
>> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
>> +       if (dev_info == NULL)
>> +               return -EINVAL;
>> +
>> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
>> +       memset(dev_info, 0, sizeof(struct rte_dmadev_info));
>> +       ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
>> +                                           sizeof(struct rte_dmadev_info));
>> +       if (ret != 0)
>> +               return ret;
>> +
>> +       dev_info->device = dev->device;
>> +       dev_info->nb_vchans = dev->data->dev_conf.max_vchans;
> 
> This will be updated after configure stage.

Yes, the dev_info->nb_vchans hold the number of virtual DMA channel configured.
Do you mean add one comment here ?

> 
> 
>> +
>> +       return 0;
>> +}
>> +
>> +int
>> +rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
>> +{
>> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
>> +       struct rte_dmadev_info info;
>> +       int ret;
>> +
>> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
>> +       if (dev_conf == NULL)
>> +               return -EINVAL;
>> +
>> +       ret = rte_dmadev_info_get(dev_id, &info);
>> +       if (ret != 0) {
>> +               RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
>> +               return -EINVAL;
>> +       }
>> +       if (dev_conf->max_vchans == 0) {
>> +               RTE_DMADEV_LOG(ERR,
>> +                       "Device %u configure zero vchans\n", dev_id);
>> +               return -EINVAL;
>> +       }
>> +       if (dev_conf->max_vchans > info.max_vchans) {
>> +               RTE_DMADEV_LOG(ERR,
>> +                       "Device %u configure too many vchans\n", dev_id);
>> +               return -EINVAL;
>> +       }
>> +       if (dev_conf->enable_silent &&
>> +           !(info.dev_capa & RTE_DMADEV_CAPA_SILENT)) {
>> +               RTE_DMADEV_LOG(ERR, "Device %u don't support silent\n", dev_id);
>> +               return -EINVAL;
>> +       }
>> +
>> +       if (dev->data->dev_started != 0) {
>> +               RTE_DMADEV_LOG(ERR,
>> +                       "Device %u must be stopped to allow configuration\n",
>> +                       dev_id);
>> +               return -EBUSY;
>> +       }
> 
> ethdev and other device class common code handles the reconfigure case. i.e
> the application configures N vchan first and reconfigures to N - M
> then free the resources
> attached to M - N. Please do the same here.

DMA is a simple device, I think it's OK to reconfigure at driver-level.

PS: If we need support reconfigure at dmadev-level, dmadev should hold the vchan-configuration,
and invoke driver's vchan_release to release resources. This may introduce more complexity.


>> +int
>> +rte_dmadev_dump(uint16_t dev_id, FILE *f)
>> +{
>> +       const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
>> +       struct rte_dmadev_info info;
>> +       int ret;
>> +
>> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
>> +       if (f == NULL)
>> +               return -EINVAL;
>> +
>> +       ret = rte_dmadev_info_get(dev_id, &info);
>> +       if (ret != 0) {
>> +               RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
>> +               return -EINVAL;
>> +       }
>> +
>> +       fprintf(f, "DMA Dev %u, '%s' [%s]\n",
>> +               dev->data->dev_id,
>> +               dev->data->dev_name,
>> +               dev->data->dev_started ? "started" : "stopped");
>> +       fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
>> +       fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
>> +       fprintf(f, "  max_vchans_configured: %u\n", info.nb_vchans);
>> +       fprintf(f, "  silent_mode: %s\n",
>> +               dev->data->dev_conf.enable_silent ? "on" : "off");
> 
> Probably iterate over each vchan and dumping the at least direction
> will be usefull.

dmadev hasn't hold vchan-configuration, Need more discussion.


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v8] dmadev: introduce DMA device library
  2021-07-20  6:53     ` fengchengwen
@ 2021-07-20  9:43       ` Jerin Jacob
  2021-07-20 10:13       ` Bruce Richardson
  1 sibling, 0 replies; 339+ messages in thread
From: Jerin Jacob @ 2021-07-20  9:43 UTC (permalink / raw)
  To: fengchengwen
  Cc: Thomas Monjalon, Ferruh Yigit, Richardson, Bruce, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On Tue, Jul 20, 2021 at 12:23 PM fengchengwen <fengchengwen@huawei.com> wrote:
>
> Thanks Jerin, comment inline
>
> On 2021/7/20 13:03, Jerin Jacob wrote:
> > On Tue, Jul 20, 2021 at 6:48 AM Chengwen Feng <fengchengwen@huawei.com> wrote:
> >>
> >> This patch introduce 'dmadevice' which is a generic type of DMA
> >> device.
> >>
> >> The APIs of dmadev library exposes some generic operations which can
> >> enable configuration and I/O with the DMA devices.
> >>
> >> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
>
> [snip]
>
> >> +int
> >> +rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
> >> +{
> >> +       const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> >> +       int ret;
> >> +
> >> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> >> +       if (dev_info == NULL)
> >> +               return -EINVAL;
> >> +
> >> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
> >> +       memset(dev_info, 0, sizeof(struct rte_dmadev_info));
> >> +       ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
> >> +                                           sizeof(struct rte_dmadev_info));
> >> +       if (ret != 0)
> >> +               return ret;
> >> +
> >> +       dev_info->device = dev->device;
> >> +       dev_info->nb_vchans = dev->data->dev_conf.max_vchans;
> >
> > This will be updated after configure stage.
>
> Yes, the dev_info->nb_vchans hold the number of virtual DMA channel configured.
> Do you mean add one comment here ?

If are taking care of the case where rte_dmadev_info_get() called
first and then configure() then fine.


>
> >
> >
> >> +
> >> +       return 0;
> >> +}
> >> +
> >> +int
> >> +rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
> >> +{
> >> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> >> +       struct rte_dmadev_info info;
> >> +       int ret;
> >> +
> >> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> >> +       if (dev_conf == NULL)
> >> +               return -EINVAL;
> >> +
> >> +       ret = rte_dmadev_info_get(dev_id, &info);
> >> +       if (ret != 0) {
> >> +               RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
> >> +               return -EINVAL;
> >> +       }
> >> +       if (dev_conf->max_vchans == 0) {
> >> +               RTE_DMADEV_LOG(ERR,
> >> +                       "Device %u configure zero vchans\n", dev_id);
> >> +               return -EINVAL;
> >> +       }
> >> +       if (dev_conf->max_vchans > info.max_vchans) {
> >> +               RTE_DMADEV_LOG(ERR,
> >> +                       "Device %u configure too many vchans\n", dev_id);
> >> +               return -EINVAL;
> >> +       }
> >> +       if (dev_conf->enable_silent &&
> >> +           !(info.dev_capa & RTE_DMADEV_CAPA_SILENT)) {
> >> +               RTE_DMADEV_LOG(ERR, "Device %u don't support silent\n", dev_id);
> >> +               return -EINVAL;
> >> +       }
> >> +
> >> +       if (dev->data->dev_started != 0) {
> >> +               RTE_DMADEV_LOG(ERR,
> >> +                       "Device %u must be stopped to allow configuration\n",
> >> +                       dev_id);
> >> +               return -EBUSY;
> >> +       }
> >
> > ethdev and other device class common code handles the reconfigure case. i.e
> > the application configures N vchan first and reconfigures to N - M
> > then free the resources
> > attached to M - N. Please do the same here.
>
> DMA is a simple device, I think it's OK to reconfigure at driver-level.

OK. If everyone thinks that way it is OK. No strong opinion.

>
> PS: If we need support reconfigure at dmadev-level, dmadev should hold the vchan-configuration,
> and invoke driver's vchan_release to release resources. This may introduce more complexity.
>
>
> >> +int
> >> +rte_dmadev_dump(uint16_t dev_id, FILE *f)
> >> +{
> >> +       const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> >> +       struct rte_dmadev_info info;
> >> +       int ret;
> >> +
> >> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> >> +       if (f == NULL)
> >> +               return -EINVAL;
> >> +
> >> +       ret = rte_dmadev_info_get(dev_id, &info);
> >> +       if (ret != 0) {
> >> +               RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
> >> +               return -EINVAL;
> >> +       }
> >> +
> >> +       fprintf(f, "DMA Dev %u, '%s' [%s]\n",
> >> +               dev->data->dev_id,
> >> +               dev->data->dev_name,
> >> +               dev->data->dev_started ? "started" : "stopped");
> >> +       fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
> >> +       fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
> >> +       fprintf(f, "  max_vchans_configured: %u\n", info.nb_vchans);
> >> +       fprintf(f, "  silent_mode: %s\n",
> >> +               dev->data->dev_conf.enable_silent ? "on" : "off");
> >
> > Probably iterate over each vchan and dumping the at least direction
> > will be usefull.
>
> dmadev hasn't hold vchan-configuration, Need more discussion.

Prefer to have that. Leaving to others.

>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v8] dmadev: introduce DMA device library
  2021-07-20  6:53     ` fengchengwen
  2021-07-20  9:43       ` Jerin Jacob
@ 2021-07-20 10:13       ` Bruce Richardson
  1 sibling, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-20 10:13 UTC (permalink / raw)
  To: fengchengwen
  Cc: Jerin Jacob, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On Tue, Jul 20, 2021 at 02:53:08PM +0800, fengchengwen wrote:
> Thanks Jerin, comment inline
> 
> On 2021/7/20 13:03, Jerin Jacob wrote:
> > On Tue, Jul 20, 2021 at 6:48 AM Chengwen Feng <fengchengwen@huawei.com> wrote:
> >>
> >> This patch introduce 'dmadevice' which is a generic type of DMA
> >> device.
> >>
> >> The APIs of dmadev library exposes some generic operations which can
> >> enable configuration and I/O with the DMA devices.
> >>
> >> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> 
> [snip]
> 
> >> +int
> >> +rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
> >> +{
> >> +       const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> >> +       int ret;
> >> +
> >> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> >> +       if (dev_info == NULL)
> >> +               return -EINVAL;
> >> +
> >> +       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
> >> +       memset(dev_info, 0, sizeof(struct rte_dmadev_info));
> >> +       ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
> >> +                                           sizeof(struct rte_dmadev_info));
> >> +       if (ret != 0)
> >> +               return ret;
> >> +
> >> +       dev_info->device = dev->device;
> >> +       dev_info->nb_vchans = dev->data->dev_conf.max_vchans;
> > 
> > This will be updated after configure stage.
> 
> Yes, the dev_info->nb_vchans hold the number of virtual DMA channel configured.
> Do you mean add one comment here ?
> 
> > 
> > 
> >> +
> >> +       return 0;
> >> +}
> >> +
> >> +int
> >> +rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
> >> +{
> >> +       struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> >> +       struct rte_dmadev_info info;
> >> +       int ret;
> >> +
> >> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> >> +       if (dev_conf == NULL)
> >> +               return -EINVAL;
> >> +
> >> +       ret = rte_dmadev_info_get(dev_id, &info);
> >> +       if (ret != 0) {
> >> +               RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
> >> +               return -EINVAL;
> >> +       }
> >> +       if (dev_conf->max_vchans == 0) {
> >> +               RTE_DMADEV_LOG(ERR,
> >> +                       "Device %u configure zero vchans\n", dev_id);
> >> +               return -EINVAL;
> >> +       }
> >> +       if (dev_conf->max_vchans > info.max_vchans) {
> >> +               RTE_DMADEV_LOG(ERR,
> >> +                       "Device %u configure too many vchans\n", dev_id);
> >> +               return -EINVAL;
> >> +       }
> >> +       if (dev_conf->enable_silent &&
> >> +           !(info.dev_capa & RTE_DMADEV_CAPA_SILENT)) {
> >> +               RTE_DMADEV_LOG(ERR, "Device %u don't support silent\n", dev_id);
> >> +               return -EINVAL;
> >> +       }
> >> +
> >> +       if (dev->data->dev_started != 0) {
> >> +               RTE_DMADEV_LOG(ERR,
> >> +                       "Device %u must be stopped to allow configuration\n",
> >> +                       dev_id);
> >> +               return -EBUSY;
> >> +       }
> > 
> > ethdev and other device class common code handles the reconfigure case. i.e
> > the application configures N vchan first and reconfigures to N - M
> > then free the resources
> > attached to M - N. Please do the same here.
> 
> DMA is a simple device, I think it's OK to reconfigure at driver-level.
> 
> PS: If we need support reconfigure at dmadev-level, dmadev should hold the vchan-configuration,
> and invoke driver's vchan_release to release resources. This may introduce more complexity.
>
I would tend to agree to keep this as it is.

> 
> >> +int
> >> +rte_dmadev_dump(uint16_t dev_id, FILE *f)
> >> +{
> >> +       const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> >> +       struct rte_dmadev_info info;
> >> +       int ret;
> >> +
> >> +       RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> >> +       if (f == NULL)
> >> +               return -EINVAL;
> >> +
> >> +       ret = rte_dmadev_info_get(dev_id, &info);
> >> +       if (ret != 0) {
> >> +               RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
> >> +               return -EINVAL;
> >> +       }
> >> +
> >> +       fprintf(f, "DMA Dev %u, '%s' [%s]\n",
> >> +               dev->data->dev_id,
> >> +               dev->data->dev_name,
> >> +               dev->data->dev_started ? "started" : "stopped");
> >> +       fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
> >> +       fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
> >> +       fprintf(f, "  max_vchans_configured: %u\n", info.nb_vchans);
> >> +       fprintf(f, "  silent_mode: %s\n",
> >> +               dev->data->dev_conf.enable_silent ? "on" : "off");
> > 
> > Probably iterate over each vchan and dumping the at least direction
> > will be usefull.
> 
> dmadev hasn't hold vchan-configuration, Need more discussion.
> 
I think this is fine as-is. The use of vchans doesn't apply to all devices
- unlike ethdev queues, for example - so I think having the driver manage
them is good.

/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v9] dmadev: introduce DMA device library
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (11 preceding siblings ...)
  2021-07-20  1:14 ` [dpdk-dev] [PATCH v8] " Chengwen Feng
@ 2021-07-20 11:12 ` Chengwen Feng
  2021-07-20 12:05   ` Bruce Richardson
  2021-07-20 12:46 ` [dpdk-dev] [PATCH v10] " Chengwen Feng
                   ` (16 subsequent siblings)
  29 siblings, 1 reply; 339+ messages in thread
From: Chengwen Feng @ 2021-07-20 11:12 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce 'dmadevice' which is a generic type of DMA
device.

The APIs of dmadev library exposes some generic operations which can
enable configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
v9:
* delete RTE_ASSERT invoke.
* make sure vchan setup fail when device started.
* add check src/dst port parameter when setup vchan.
* rename some variables in rte_dmadev.c.
v8:
* fix pcie access port diagram doxygen problem.
* fix typo.
* fix compile warning when enable DMADEV_DEBUG.
v7:
* add rte_dmadev_get_dev_id API.
* fix typo.
* use the default macro assignment scheme.
* rename RTE_DMA_DEV_CAPA_* to RTE_DMADEV_CAPA_*.
* rename rte_dmadev_conf.silent_mode to enable_silent.
* add memset when get stats.
v6:
* delete fence capability.
* delete vchan_release ops.
* copy_sg direct use src/dst/nb_src/nb_dst as paramter.
* define rte_dma_direction, don't support multiple direction in the
  same vchan.
* fix segment fault when allocate.
* fix typo.
* fix comments format.
v5:
* add doxy-api-* file modify.
* use RTE_LOG_REGISTER_DEFAULT.
* fix typo.
* resolve some incorrect comments.
* fix some doxgen problem.
* fix version.map still hold rte_dmadev_completed_fails.
v4:
* replace xxx_complete_fails with xxx_completed_status.
* add SILENT capability, also a silent_mode in rte_dmadev_conf.
* add op_flag_llc for performance.
* rename dmadev_xxx_t to rte_dmadev_xxx_t to avoid namespace conflict.
* delete filed 'enqueued_count' from rte_dmadev_stats.
* make rte_dmadev hold 'dev_private' filed.
* add RTE_DMA_STATUS_NOT_ATTEMPED status code.
* rename RTE_DMA_STATUS_ACTIVE_DROP to RTE_DMA_STATUS_USER_ABORT.
* rename rte_dma_sg(e) to rte_dmadev_sg(e) to make sure all struct
  prefix with rte_dmadev.
* put the comment afterwards.
* fix some doxgen problem.
* delete macro RTE_DMADEV_VALID_DEV_ID_OR_RET and
  RTE_DMADEV_PTR_OR_ERR_RET.
* replace strlcpy with rte_strscpy.
* other minor modifications from review comment.
v3:
* rm reset and fill_sg ops.
* rm MT-safe capabilities.
* add submit flag.
* redefine rte_dma_sg to implement asymmetric copy.
* delete some reserved field for future use.
* rearrangement rte_dmadev/rte_dmadev_data struct.
* refresh rte_dmadev.h copyright.
* update vchan setup parameter.
* modified some inappropriate descriptions.
* arrange version.map alphabetically.
* other minor modifications from review comment.
---
 MAINTAINERS                  |    4 +
 config/rte_config.h          |    3 +
 doc/api/doxy-api-index.md    |    1 +
 doc/api/doxy-api.conf.in     |    1 +
 lib/dmadev/meson.build       |    7 +
 lib/dmadev/rte_dmadev.c      |  563 +++++++++++++++++++++++
 lib/dmadev/rte_dmadev.h      | 1041 ++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h |  182 ++++++++
 lib/dmadev/rte_dmadev_pmd.h  |   72 +++
 lib/dmadev/version.map       |   37 ++
 lib/meson.build              |    1 +
 11 files changed, 1912 insertions(+)
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index af2a91d..e01a07f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -495,6 +495,10 @@ F: drivers/raw/skeleton/
 F: app/test/test_rawdev.c
 F: doc/guides/prog_guide/rawdev.rst
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+
 
 Memory Pool Drivers
 -------------------
diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c..331a431 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -81,6 +81,9 @@
 /* rawdev defines */
 #define RTE_RAWDEV_MAX_DEVS 64
 
+/* dmadev defines */
+#define RTE_DMADEV_MAX_DEVS 64
+
 /* ip_fragmentation defines */
 #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
 #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107..ce08250 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -27,6 +27,7 @@ The public API headers are grouped by topics:
   [event_timer_adapter]    (@ref rte_event_timer_adapter.h),
   [event_crypto_adapter]   (@ref rte_event_crypto_adapter.h),
   [rawdev]             (@ref rte_rawdev.h),
+  [dmadev]             (@ref rte_dmadev.h),
   [metrics]            (@ref rte_metrics.h),
   [bitrate]            (@ref rte_bitrate.h),
   [latency]            (@ref rte_latencystats.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 325a019..109ec1f 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -35,6 +35,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
                           @TOPDIR@/lib/distributor \
+                          @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/efd \
                           @TOPDIR@/lib/ethdev \
                           @TOPDIR@/lib/eventdev \
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000..d2fc85e
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+sources = files('rte_dmadev.c')
+headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000..b4f5498
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,563 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
+
+static const char *mz_rte_dmadev_data = "rte_dmadev_data";
+/* Shared memory between primary and secondary processes. */
+static struct {
+	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
+} *dmadev_shared_data;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dmadev_logtype, INFO);
+#define RTE_DMADEV_LOG(level, ...) \
+	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "" __VA_ARGS__)
+
+/* Macros to check for valid device id */
+#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dmadev_is_valid_dev(dev_id)) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+static int
+dmadev_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMADEV_LOG(ERR, "Name can't be NULL\n");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMADEV_LOG(ERR, "Zero length DMA device name\n");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
+		RTE_DMADEV_LOG(ERR, "DMA device name is too long\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint16_t
+dmadev_find_free_dev(void)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (dmadev_shared_data->data[i].dev_name[0] == '\0')
+			return i;
+	}
+
+	return RTE_DMADEV_MAX_DEVS;
+}
+
+static struct rte_dmadev*
+dmadev_find(const char *name)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&
+		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
+			return &rte_dmadevices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dmadev_shared_data_prepare(void)
+{
+	const struct rte_memzone *mz;
+
+	if (dmadev_shared_data == NULL) {
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			/* Allocate port data and ownership shared memory. */
+			mz = rte_memzone_reserve(mz_rte_dmadev_data,
+					 sizeof(*dmadev_shared_data),
+					 rte_socket_id(), 0);
+		} else
+			mz = rte_memzone_lookup(mz_rte_dmadev_data);
+		if (mz == NULL)
+			return -ENOMEM;
+
+		dmadev_shared_data = mz->addr;
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+			memset(dmadev_shared_data->data, 0,
+			       sizeof(dmadev_shared_data->data));
+	}
+
+	return 0;
+}
+
+static struct rte_dmadev *
+dmadev_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t dev_id;
+
+	dev = dmadev_find(name);
+	if (dev != NULL) {
+		RTE_DMADEV_LOG(ERR, "DMA device already allocated\n");
+		return NULL;
+	}
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	dev_id = dmadev_find_free_dev();
+	if (dev_id == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices\n");
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+	dev->data = &dmadev_shared_data->data[dev_id];
+	dev->data->dev_id = dev_id;
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+
+	return dev;
+}
+
+static struct rte_dmadev *
+dmadev_attach_secondary(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t i;
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %s is not driven by the primary process\n",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[i];
+	dev->data = &dmadev_shared_data->data[i];
+	dev->dev_private = dev->data->dev_private;
+
+	return dev;
+}
+
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dmadev_allocate(name);
+	else
+		dev = dmadev_attach_secondary(name);
+
+	if (dev == NULL)
+		return NULL;
+	dev->state = RTE_DMADEV_ATTACHED;
+
+	return dev;
+}
+
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev)
+{
+	void *dev_private_tmp;
+
+	if (dev == NULL)
+		return -EINVAL;
+
+	if (dev->state == RTE_DMADEV_UNUSED)
+		return 0;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
+
+	dev_private_tmp = dev->dev_private;
+	memset(dev, 0, sizeof(struct rte_dmadev));
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev->dev_private = dev_private_tmp;
+	dev->state = RTE_DMADEV_UNUSED;
+
+	return 0;
+}
+
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name)
+{
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+	return dmadev_find(name);
+}
+
+int
+rte_dmadev_get_dev_id(const char *name)
+{
+	struct rte_dmadev *dev = rte_dmadev_get_device_by_name(name);
+	if (dev != NULL)
+		return dev->data->dev_id;
+	return -EINVAL;
+}
+
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id)
+{
+	return (dev_id < RTE_DMADEV_MAX_DEVS) &&
+		rte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED;
+}
+
+uint16_t
+rte_dmadev_count(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
+			count++;
+	}
+
+	return count;
+}
+
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dmadev_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->device = dev->device;
+	dev_info->nb_vchans = dev->data->dev_conf.max_vchans;
+
+	return 0;
+}
+
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->max_vchans == 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure zero vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->max_vchans > info.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure too many vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_silent &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_SILENT)) {
+		RTE_DMADEV_LOG(ERR, "Device %u don't support silent\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf);
+	if (ret == 0)
+		memcpy(&dev->data->dev_conf, dev_conf, sizeof(*dev_conf));
+
+	return ret;
+}
+
+int
+rte_dmadev_start(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already started\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->data->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dmadev_stop(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started == 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already stopped\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dmadev_close(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped before closing\n", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	return (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	bool src_is_dev, dst_is_dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u number of descriptors invalid\n", dev_id);
+		return -EINVAL;
+	}
+	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->src_port.port_type == RTE_DMADEV_PORT_NONE && src_is_dev) ||
+	    (conf->src_port.port_type != RTE_DMADEV_PORT_NONE && !src_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u source port type invalid\n", dev_id);
+		return -EINVAL;
+	}
+	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->dst_port.port_type == RTE_DMADEV_PORT_NONE && dst_is_dev) ||
+	    (conf->dst_port.port_type != RTE_DMADEV_PORT_NONE && !dst_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u destination port type invalid\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, conf);
+}
+
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (stats == NULL)
+		return -EINVAL;
+	if (vchan >= dev->data->dev_conf.max_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	memset(stats, 0, sizeof(struct rte_dmadev_stats));
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dmadev_stats));
+}
+
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->data->dev_conf.max_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (f == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+
+	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
+	fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
+	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
+	fprintf(f, "  max_vchans_configured: %u\n", info.nb_vchans);
+	fprintf(f, "  silent_mode: %s\n",
+		dev->data->dev_conf.enable_silent ? "on" : "off");
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
+
+int
+rte_dmadev_selftest(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
+	return (*dev->dev_ops->dev_selftest)(dev_id);
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000..53d85f1
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,1041 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ * Copyright(c) 2021 Marvell International Ltd.
+ * Copyright(c) 2021 SmartShare Systems.
+ */
+
+#ifndef _RTE_DMADEV_H_
+#define _RTE_DMADEV_H_
+
+/**
+ * @file rte_dmadev.h
+ *
+ * RTE DMA (Direct Memory Access) device APIs.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW-DMA-channel |               | HW-DMA-channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW-DMA-Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel. e.g. Application could create
+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+ * virtual DMA channel 1 for memory-to-device transfer scenario.
+ *
+ * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
+ * PCI/SoC device probing phase performed at EAL initialization time. And could
+ * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
+ * phase.
+ *
+ * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dmadev_configure()
+ *     - rte_dmadev_vchan_setup()
+ *     - rte_dmadev_start()
+ *
+ * Then, the application can invoke dataplane APIs to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dmadev_configure() or rte_dmadev_vchan_setup()), it must invoke
+ * rte_dmadev_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dmadev_start() again. The dataplane APIs should not be
+ * invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the
+ * rte_dmadev_close() function.
+ *
+ * The dataplane APIs include two parts:
+ * The first part is the submission of operation requests:
+ *     - rte_dmadev_copy()
+ *     - rte_dmadev_copy_sg()
+ *     - rte_dmadev_fill()
+ *     - rte_dmadev_submit()
+ *
+ * These APIs could work with different virtual DMA channels which have
+ * different contexts.
+ *
+ * The first three APIs are used to submit the operation request to the virtual
+ * DMA channel, if the submission is successful, a uint16_t ring_idx is
+ * returned, otherwise a negative number is returned.
+ *
+ * The last API was used to issue doorbell to hardware, and also there are flags
+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
+ * same work.
+ *
+ * The second part is to obtain the result of requests:
+ *     - rte_dmadev_completed()
+ *         - return the number of operation requests completed successfully.
+ *     - rte_dmadev_completed_status()
+ *         - return the number of operation requests completed.
+ *
+ * @note If the dmadev works in silent mode (@see RTE_DMADEV_CAPA_SILENT),
+ * application does not invoke the above two completed APIs.
+ *
+ * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
+ * rte_dmadev_fill()) returned, the rules are as follows:
+ *     - ring_idx for each virtual DMA channel are independent.
+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *       when it reach UINT16_MAX, it wraps back to zero.
+ *     - This ring_idx can be used by applications to track per-operation
+ *       metadata in an application-defined circular ring.
+ *     - The initial ring_idx of a virtual DMA channel is zero, after the
+ *       device is stopped, the ring_idx needs to be reset to zero.
+ *
+ * One example:
+ *     - step-1: start one dmadev
+ *     - step-2: enqueue a copy operation, the ring_idx return is 0
+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1
+ *     - ...
+ *     - step-101: stop the dmadev
+ *     - step-102: start the dmadev
+ *     - step-103: enqueue a copy operation, the cookie return is 0
+ *     - ...
+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *
+ * The DMA operation address used in enqueue APIs (i.e. rte_dmadev_copy(),
+ * rte_dmadev_copy_sg(), rte_dmadev_fill()) defined as rte_iova_t type. The
+ * dmadev supports two types of address: memory address and device address.
+ *
+ * - memory address: the source and destination address of the memory-to-memory
+ * transfer type, or the source address of the memory-to-device transfer type,
+ * or the destination address of the device-to-memory transfer type.
+ * @note If the device support SVA (@see RTE_DMADEV_CAPA_SVA), the memory
+ * address can be any VA address, otherwise it must be an IOVA address.
+ *
+ * - device address: the source and destination address of the device-to-device
+ * transfer type, or the source address of the device-to-memory transfer type,
+ * or the destination address of the memory-to-device transfer type.
+ *
+ * By default, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
+ *
+ */
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the device identifier for the named DMA device.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   Returns DMA device identifier on success.
+ *   - <0: Failure to find named DMA device.
+ */
+__rte_experimental
+int
+rte_dmadev_get_dev_id(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_count(void);
+
+/* Enumerates DMA device capabilities. */
+#define RTE_DMADEV_CAPA_MEM_TO_MEM	(1ull << 0)
+/**< DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_MEM_TO_DEV	(1ull << 1)
+/**< DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_MEM	(1ull << 2)
+/**< DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_DEV	(1ull << 3)
+/**< DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_SVA		(1ull << 4)
+/**< DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_SILENT		(1ull << 5)
+/**< DMA device support work in silent mode.
+ * In this mode, application don't required to invoke rte_dmadev_completed*()
+ * API.
+ *
+ * @see struct rte_dmadev_conf::silent_mode
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY	(1ull << 32)
+/**< DMA device support copy ops.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY_SG	(1ull << 33)
+/**< DMA device support scatter-list copy ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_FILL	(1ull << 34)
+/**< DMA device support fill ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ */
+struct rte_dmadev_info {
+	struct rte_device *device; /**< Generic Device information. */
+	uint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channels supported. */
+	uint16_t max_desc;
+	/**< Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/**< Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   - =0: Success, driver updates the information of the DMA device.
+ *   - <0: Error code returned by the driver info get function.
+ *
+ */
+__rte_experimental
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ */
+struct rte_dmadev_conf {
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channel to use.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dmadev_info which get from rte_dmadev_info_get().
+	 */
+	bool enable_silent;
+	/**< Indicates whether to enable silent mode.
+	 * false-default mode, true-silent mode.
+	 * This value can be set to true only when the SILENT capability is
+	 * supported.
+	 *
+	 * @see RTE_DMADEV_CAPA_SILENT
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dmadev_conf
+ *   object.
+ *
+ * @return
+ *   - =0: Success, device configured.
+ *   - <0: Error code returned by the driver configuration function.
+ */
+__rte_experimental
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device started.
+ *   - <0: Error code returned by the driver start function.
+ */
+__rte_experimental
+int
+rte_dmadev_start(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dmadev_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device stopped.
+ *   - <0: Error code returned by the driver stop function.
+ */
+__rte_experimental
+int
+rte_dmadev_stop(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *  - =0: Successfully close device
+ *  - <0: Failure to close device
+ */
+__rte_experimental
+int
+rte_dmadev_close(uint16_t dev_id);
+
+/**
+ * rte_dma_direction - DMA transfer direction defines.
+ */
+enum rte_dma_direction {
+	RTE_DMA_DIR_MEM_TO_MEM,
+	/**< DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV,
+	/**< DMA transfer direction - from memory to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is SoCs memory) to device (which is host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM,
+	/**< DMA transfer direction - from device to memory.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to memory (which is SoCs memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV,
+	/**< DMA transfer direction - from device to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to the device (which is another host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+};
+
+/**
+ * enum rte_dmadev_port_type - DMA access port type defines.
+ *
+ * @see struct rte_dmadev_port_param::port_type
+ */
+enum rte_dmadev_port_type {
+	RTE_DMADEV_PORT_NONE,
+	RTE_DMADEV_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dmadev_vchan_conf::src_port
+ * @see struct rte_dmadev_vchan_conf::dst_port
+ */
+struct rte_dmadev_port_param {
+	enum rte_dmadev_port_type port_type;
+	/**< The device access port type.
+	 * @see enum rte_dmadev_port_type
+	 */
+	union {
+		/** PCIe access port parameters.
+		 *
+		 * The following model shows SoC's PCIe module connects to
+		 * multiple PCIe hosts and multiple endpoints. The PCIe module
+		 * has an integrated DMA controller.
+		 *
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * \code{.unparsed}
+		 * System Bus
+		 *    |     ----------PCIe module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIe Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIe Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIe Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * \endcode
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		struct {
+			uint64_t coreid : 4; /**< PCIe core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			uint64_t pasid : 20;
+			/**< The pasid filed in TLP packet. */
+			uint64_t attr : 3;
+			/**< The attributes filed in TLP packet. */
+			uint64_t ph : 2;
+			/**< The processing hint filed in TLP packet. */
+			uint64_t st : 16;
+			/**< The steering tag filed in TLP packet. */
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ */
+struct rte_dmadev_vchan_conf {
+	enum rte_dma_direction direction;
+	/**< Transfer direction
+	 * @see enum rte_dma_direction
+	 */
+	uint16_t nb_desc;
+	/**< Number of descriptor for the virtual DMA channel */
+	struct rte_dmadev_port_param src_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+	struct rte_dmadev_port_param dst_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dmadev_vchan_conf object.
+ *
+ * @return
+ *   - >=0: Allocate success, it is the virtual DMA channel id. This value must
+ *          be less than the field 'max_vchans' of struct rte_dmadev_conf
+ *          which configured by rte_dmadev_configure().
+ *   - <0: Error code returned by the driver virtual channel setup function.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf);
+
+/**
+ * rte_dmadev_stats - running statistics.
+ */
+struct rte_dmadev_stats {
+	uint64_t submitted_count;
+	/**< Count of operations which were submitted to hardware. */
+	uint64_t completed_fail_count;
+	/**< Count of operations which failed to complete. */
+	uint64_t completed_count;
+	/**< Count of operations which successfully complete. */
+};
+
+#define RTE_DMADEV_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dmadev_stats
+ *   object.
+ *
+ * @return
+ *   - =0: Successfully retrieve stats.
+ *   - <0: Failure to retrieve stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   - =0: Successfully reset stats.
+ *   - <0: Failure to reset stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Non-zero otherwise.
+ */
+__rte_experimental
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger the dmadev self test.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - 0: Selftest successful.
+ *   - -ENOTSUP if the device doesn't support selftest
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_selftest(uint16_t dev_id);
+
+/**
+ * rte_dma_status_code - DMA transfer result status code defines.
+ */
+enum rte_dma_status_code {
+	RTE_DMA_STATUS_SUCCESSFUL,
+	/**< The operation completed successfully. */
+	RTE_DMA_STATUS_USRER_ABORT,
+	/**< The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPTED,
+	/**< The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/**< The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/**< The operation failed to complete due invalid destination
+	 * address.
+	 */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/**< The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/**< The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/**< The operation failed to complete due bus err. */
+	RTE_DMA_STATUS_DATA_POISION,
+	/**< The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/**< The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/**< The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the memory-to-device/
+	 * device-to-memory/device-to-device transfer scenario.
+	 */
+	RTE_DMA_STATUS_UNKNOWN = 0x100,
+	/**< The operation failed to complete due unknown reason.
+	 * The initial value is 256, which reserves space for future errors.
+	 */
+};
+
+/**
+ * rte_dmadev_sge - can hold scatter DMA operation request entry.
+ */
+struct rte_dmadev_sge {
+	rte_iova_t addr; /**< The DMA operation address. */
+	uint32_t length; /**< The DMA operation length. */
+};
+
+#include "rte_dmadev_core.h"
+
+/* DMA flags to augment operation preparation. */
+#define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
+/**< DMA fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ * If the specify DMA HW works in-order (it means it has default fence between
+ * operations), this flag could be NOP.
+ *
+ * @see rte_dmadev_copy()
+ * @see rte_dmadev_copy_sg()
+ * @see rte_dmadev_fill()
+ */
+
+#define RTE_DMA_OP_FLAG_SUBMIT	(1ull << 1)
+/**< DMA submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ */
+
+#define RTE_DMA_OP_FLAG_LLC	(1ull << 2)
+/**< DMA write data to low level cache hint.
+ * Used for performance optimization, this is just a hint, and there is no
+ * capability bit for this, driver should not return error if this flag was set.
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy job.
+ *   - <0: Error code returned by the driver copy function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+		uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+#endif
+
+	return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter list copy operation to be performed by hardware, if
+ * the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell
+ * to begin this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The pointer of source scatter entry array.
+ * @param dst
+ *   The pointer of destination scatter entry array.
+ * @param nb_src
+ *   The number of source scatter entry.
+ * @param nb_dst
+ *   The number of destination scatter entry.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy scatterlist job.
+ *   - <0: Error code returned by the driver copy scatterlist function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
+		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
+		   uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+#endif
+
+	return (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued fill job.
+ *   - <0: Error code returned by the driver fill function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
+		rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+#endif
+
+	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dmadev_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   - =0: Successfully trigger hardware.
+ *   - <0: Failure to trigger hardware.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+#endif
+
+	return (*dev->submit)(dev, vchan);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed. This return value
+ *   must be less than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		     uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans || nb_cpls == 0)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been completed, and the
+ * operations result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   The error code of operations that completed.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that completed. This return value must be less
+ *   than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
+			    const uint16_t nb_cpls, uint16_t *last_idx,
+			    enum rte_dma_status_code *status)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    nb_cpls == 0 || status == NULL)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_H_ */
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000..0122f67
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#ifndef _RTE_DMADEV_CORE_H_
+#define _RTE_DMADEV_CORE_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device internal header.
+ *
+ * This header contains internal data types, that are used by the DMA devices
+ * in order to expose their ops to the class.
+ *
+ * Applications should not use these API directly.
+ *
+ */
+
+struct rte_dmadev;
+
+typedef int (*rte_dmadev_info_get_t)(const struct rte_dmadev *dev,
+				     struct rte_dmadev_info *dev_info,
+				     uint32_t info_sz);
+/**< @internal Used to get device information of a device. */
+
+typedef int (*rte_dmadev_configure_t)(struct rte_dmadev *dev,
+				      const struct rte_dmadev_conf *dev_conf);
+/**< @internal Used to configure a device. */
+
+typedef int (*rte_dmadev_start_t)(struct rte_dmadev *dev);
+/**< @internal Used to start a configured device. */
+
+typedef int (*rte_dmadev_stop_t)(struct rte_dmadev *dev);
+/**< @internal Used to stop a configured device. */
+
+typedef int (*rte_dmadev_close_t)(struct rte_dmadev *dev);
+/**< @internal Used to close a configured device. */
+
+typedef int (*rte_dmadev_vchan_setup_t)(struct rte_dmadev *dev,
+				const struct rte_dmadev_vchan_conf *conf);
+/**< @internal Used to allocate and set up a virtual DMA channel. */
+
+typedef int (*rte_dmadev_stats_get_t)(const struct rte_dmadev *dev,
+			uint16_t vchan, struct rte_dmadev_stats *stats,
+			uint32_t stats_sz);
+/**< @internal Used to retrieve basic statistics. */
+
+typedef int (*rte_dmadev_stats_reset_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to reset basic statistics. */
+
+typedef int (*rte_dmadev_dump_t)(const struct rte_dmadev *dev, FILE *f);
+/**< @internal Used to dump internal information. */
+
+typedef int (*rte_dmadev_selftest_t)(uint16_t dev_id);
+/**< @internal Used to start dmadev selftest. */
+
+typedef int (*rte_dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 rte_iova_t src, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a copy operation. */
+
+typedef int (*rte_dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
+				    const struct rte_dmadev_sge *src,
+				    const struct rte_dmadev_sge *dst,
+				    uint16_t nb_src, uint16_t nb_dst,
+				    uint64_t flags);
+/**< @internal Used to enqueue a scatter list copy operation. */
+
+typedef int (*rte_dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 uint64_t pattern, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a fill operation. */
+
+typedef int (*rte_dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to trigger hardware to begin working. */
+
+typedef uint16_t (*rte_dmadev_completed_t)(struct rte_dmadev *dev,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+/**< @internal Used to return number of successful completed operations. */
+
+typedef uint16_t (*rte_dmadev_completed_status_t)(struct rte_dmadev *dev,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+/**< @internal Used to return number of completed operations. */
+
+/**
+ * Possible states of a DMA device.
+ */
+enum rte_dmadev_state {
+	RTE_DMADEV_UNUSED = 0,
+	/**< Device is unused before being probed. */
+	RTE_DMADEV_ATTACHED,
+	/**< Device is attached when allocated in probing. */
+};
+
+/**
+ * DMA device operations function pointer table
+ */
+struct rte_dmadev_ops {
+	rte_dmadev_info_get_t dev_info_get;
+	rte_dmadev_configure_t dev_configure;
+	rte_dmadev_start_t dev_start;
+	rte_dmadev_stop_t dev_stop;
+	rte_dmadev_close_t dev_close;
+	rte_dmadev_vchan_setup_t vchan_setup;
+	rte_dmadev_stats_get_t stats_get;
+	rte_dmadev_stats_reset_t stats_reset;
+	rte_dmadev_dump_t dev_dump;
+	rte_dmadev_selftest_t dev_selftest;
+};
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_dmadev_data {
+	void *dev_private;
+	/**< PMD-specific private data.
+	 * This is a copy of the 'dev_private' field in the 'struct rte_dmadev'
+	 * from primary process, it is used by the secondary process to get
+	 * dev_private information.
+	 */
+	uint16_t dev_id; /**< Device [external] identifier. */
+	char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ *
+ * The dataplane APIs are located at the beginning of the structure, along
+ * with the pointer to where all the data elements for the particular device
+ * are stored in shared memory. This split scheme allows the function pointer
+ * and driver data to be per-process, while the actual configuration data for
+ * the device is shared.
+ * And the 'dev_private' field was placed in the first cache line to optimize
+ * performance because the PMD driver mainly depends on this field.
+ */
+struct rte_dmadev {
+	rte_dmadev_copy_t copy;
+	rte_dmadev_copy_sg_t copy_sg;
+	rte_dmadev_fill_t fill;
+	rte_dmadev_submit_t submit;
+	rte_dmadev_completed_t completed;
+	rte_dmadev_completed_status_t completed_status;
+	void *reserved_ptr; /**< Reserved for future IO function. */
+	void *dev_private;
+	/**< PMD-specific private data.
+	 *
+	 * - If is the primary process, after dmadev allocated by
+	 * rte_dmadev_pmd_allocate(), the PCI/SoC device probing should
+	 * initialize this field, and copy it's value to the 'dev_private'
+	 * field of 'struct rte_dmadev_data' which pointer by 'data' filed.
+	 *
+	 * - If is the secondary process, dmadev framework will initialize this
+	 * field by copy from 'dev_private' field of 'struct rte_dmadev_data'
+	 * which initialized by primary process.
+	 *
+	 * @note It's the primary process responsibility to deinitialize this
+	 * field after invoke rte_dmadev_pmd_release() in the PCI/SoC device
+	 * removing stage.
+	 */
+	struct rte_dmadev_data *data; /**< Pointer to device data. */
+	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
+	struct rte_device *device;
+	/**< Device info which supplied during device initialization. */
+	enum rte_dmadev_state state; /**< Flag indicating the device state. */
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+} __rte_cache_aligned;
+
+extern struct rte_dmadev rte_dmadevices[];
+
+#endif /* _RTE_DMADEV_CORE_H_ */
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000..45141f9
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_PMD_H_
+#define _RTE_DMADEV_PMD_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device PMD APIs
+ *
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param dev
+ *   Device to be released.
+ *
+ * @return
+ *   - 0 on success, negative on error
+ */
+__rte_internal
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev);
+
+/**
+ * @internal
+ * Return the DMA device based on the device name.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_PMD_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000..0f2ed4b
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,37 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dmadev_close;
+	rte_dmadev_completed;
+	rte_dmadev_completed_status;
+	rte_dmadev_configure;
+	rte_dmadev_copy;
+	rte_dmadev_copy_sg;
+	rte_dmadev_count;
+	rte_dmadev_dump;
+	rte_dmadev_fill;
+	rte_dmadev_get_dev_id;
+	rte_dmadev_info_get;
+	rte_dmadev_is_valid_dev;
+	rte_dmadev_selftest;
+	rte_dmadev_start;
+	rte_dmadev_stats_get;
+	rte_dmadev_stats_reset;
+	rte_dmadev_stop;
+	rte_dmadev_submit;
+	rte_dmadev_vchan_setup;
+
+	local: *;
+};
+
+INTERNAL {
+        global:
+
+	rte_dmadevices;
+	rte_dmadev_get_device_by_name;
+	rte_dmadev_pmd_allocate;
+	rte_dmadev_pmd_release;
+
+	local: *;
+};
+
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4..68d239f 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -60,6 +60,7 @@ libraries = [
         'bpf',
         'graph',
         'node',
+        'dmadev',
 ]
 
 if is_windows
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v9] dmadev: introduce DMA device library
  2021-07-20 11:12 ` [dpdk-dev] [PATCH v9] " Chengwen Feng
@ 2021-07-20 12:05   ` Bruce Richardson
  0 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-07-20 12:05 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

On Tue, Jul 20, 2021 at 07:12:52PM +0800, Chengwen Feng wrote:
> This patch introduce 'dmadevice' which is a generic type of DMA
> device.
> 
> The APIs of dmadev library exposes some generic operations which can
> enable configuration and I/O with the DMA devices.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>

Apologies for late feedback. Just reporting issues as I find them working
with the code.

/Bruce

> ---
<snip>
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Returns the number of operations that have been completed, and the
> + * operations result may succeed or fail.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + * @param nb_cpls
> + *   Indicates the size of status array.
> + * @param[out] last_idx
> + *   The last completed operation's index.
> + *   If not required, NULL can be passed in.
> + * @param[out] status
> + *   The error code of operations that completed.
> + *   @see enum rte_dma_status_code

The documentation should make it clear that this is an array with "nb_cpls"
entries.

> + *
> + * @return
> + *   The number of operations that completed. This return value must be less
> + *   than or equal to the value of nb_cpls.

also update this to report that the appropriate number of "status" entries
have been filled out.

> + */
> +__rte_experimental
> +static inline uint16_t
> +rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
> +			    const uint16_t nb_cpls, uint16_t *last_idx,
> +			    enum rte_dma_status_code *status)
<snip>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v10] dmadev: introduce DMA device library
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (12 preceding siblings ...)
  2021-07-20 11:12 ` [dpdk-dev] [PATCH v9] " Chengwen Feng
@ 2021-07-20 12:46 ` Chengwen Feng
  2021-07-26  6:53   ` fengchengwen
  2021-07-27  3:39 ` [dpdk-dev] [PATCH v11 0/2] support dmadev Chengwen Feng
                   ` (15 subsequent siblings)
  29 siblings, 1 reply; 339+ messages in thread
From: Chengwen Feng @ 2021-07-20 12:46 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce 'dmadevice' which is a generic type of DMA
device.

The APIs of dmadev library exposes some generic operations which can
enable configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
v10:
* fix rte_dmadev_completed_status comment.
v9:
* delete RTE_ASSERT invoke.
* make sure vchan setup fail when device started.
* add check src/dst port parameter when setup vchan.
* rename some variables in rte_dmadev.c.
v8:
* fix pcie access port diagram doxygen problem.
* fix typo.
* fix compile warning when enable DMADEV_DEBUG.
v7:
* add rte_dmadev_get_dev_id API.
* fix typo.
* use the default macro assignment scheme.
* rename RTE_DMA_DEV_CAPA_* to RTE_DMADEV_CAPA_*.
* rename rte_dmadev_conf.silent_mode to enable_silent.
* add memset when get stats.
v6:
* delete fence capability.
* delete vchan_release ops.
* copy_sg direct use src/dst/nb_src/nb_dst as paramter.
* define rte_dma_direction, don't support multiple direction in the
  same vchan.
* fix segment fault when allocate.
* fix typo.
* fix comments format.
v5:
* add doxy-api-* file modify.
* use RTE_LOG_REGISTER_DEFAULT.
* fix typo.
* resolve some incorrect comments.
* fix some doxgen problem.
* fix version.map still hold rte_dmadev_completed_fails.
v4:
* replace xxx_complete_fails with xxx_completed_status.
* add SILENT capability, also a silent_mode in rte_dmadev_conf.
* add op_flag_llc for performance.
* rename dmadev_xxx_t to rte_dmadev_xxx_t to avoid namespace conflict.
* delete filed 'enqueued_count' from rte_dmadev_stats.
* make rte_dmadev hold 'dev_private' filed.
* add RTE_DMA_STATUS_NOT_ATTEMPED status code.
* rename RTE_DMA_STATUS_ACTIVE_DROP to RTE_DMA_STATUS_USER_ABORT.
* rename rte_dma_sg(e) to rte_dmadev_sg(e) to make sure all struct
  prefix with rte_dmadev.
* put the comment afterwards.
* fix some doxgen problem.
* delete macro RTE_DMADEV_VALID_DEV_ID_OR_RET and
  RTE_DMADEV_PTR_OR_ERR_RET.
* replace strlcpy with rte_strscpy.
* other minor modifications from review comment.
v3:
* rm reset and fill_sg ops.
* rm MT-safe capabilities.
* add submit flag.
* redefine rte_dma_sg to implement asymmetric copy.
* delete some reserved field for future use.
* rearrangement rte_dmadev/rte_dmadev_data struct.
* refresh rte_dmadev.h copyright.
* update vchan setup parameter.
* modified some inappropriate descriptions.
* arrange version.map alphabetically.
* other minor modifications from review comment.
---
 MAINTAINERS                  |    4 +
 config/rte_config.h          |    3 +
 doc/api/doxy-api-index.md    |    1 +
 doc/api/doxy-api.conf.in     |    1 +
 lib/dmadev/meson.build       |    7 +
 lib/dmadev/rte_dmadev.c      |  563 +++++++++++++++++++++++
 lib/dmadev/rte_dmadev.h      | 1044 ++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h |  182 ++++++++
 lib/dmadev/rte_dmadev_pmd.h  |   72 +++
 lib/dmadev/version.map       |   37 ++
 lib/meson.build              |    1 +
 11 files changed, 1915 insertions(+)
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index af2a91d..e01a07f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -495,6 +495,10 @@ F: drivers/raw/skeleton/
 F: app/test/test_rawdev.c
 F: doc/guides/prog_guide/rawdev.rst
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+
 
 Memory Pool Drivers
 -------------------
diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c..331a431 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -81,6 +81,9 @@
 /* rawdev defines */
 #define RTE_RAWDEV_MAX_DEVS 64
 
+/* dmadev defines */
+#define RTE_DMADEV_MAX_DEVS 64
+
 /* ip_fragmentation defines */
 #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
 #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107..ce08250 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -27,6 +27,7 @@ The public API headers are grouped by topics:
   [event_timer_adapter]    (@ref rte_event_timer_adapter.h),
   [event_crypto_adapter]   (@ref rte_event_crypto_adapter.h),
   [rawdev]             (@ref rte_rawdev.h),
+  [dmadev]             (@ref rte_dmadev.h),
   [metrics]            (@ref rte_metrics.h),
   [bitrate]            (@ref rte_bitrate.h),
   [latency]            (@ref rte_latencystats.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 325a019..109ec1f 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -35,6 +35,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
                           @TOPDIR@/lib/distributor \
+                          @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/efd \
                           @TOPDIR@/lib/ethdev \
                           @TOPDIR@/lib/eventdev \
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000..d2fc85e
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+sources = files('rte_dmadev.c')
+headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000..b4f5498
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,563 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
+
+static const char *mz_rte_dmadev_data = "rte_dmadev_data";
+/* Shared memory between primary and secondary processes. */
+static struct {
+	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
+} *dmadev_shared_data;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dmadev_logtype, INFO);
+#define RTE_DMADEV_LOG(level, ...) \
+	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "" __VA_ARGS__)
+
+/* Macros to check for valid device id */
+#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dmadev_is_valid_dev(dev_id)) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+static int
+dmadev_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMADEV_LOG(ERR, "Name can't be NULL\n");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMADEV_LOG(ERR, "Zero length DMA device name\n");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
+		RTE_DMADEV_LOG(ERR, "DMA device name is too long\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint16_t
+dmadev_find_free_dev(void)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (dmadev_shared_data->data[i].dev_name[0] == '\0')
+			return i;
+	}
+
+	return RTE_DMADEV_MAX_DEVS;
+}
+
+static struct rte_dmadev*
+dmadev_find(const char *name)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&
+		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
+			return &rte_dmadevices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dmadev_shared_data_prepare(void)
+{
+	const struct rte_memzone *mz;
+
+	if (dmadev_shared_data == NULL) {
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			/* Allocate port data and ownership shared memory. */
+			mz = rte_memzone_reserve(mz_rte_dmadev_data,
+					 sizeof(*dmadev_shared_data),
+					 rte_socket_id(), 0);
+		} else
+			mz = rte_memzone_lookup(mz_rte_dmadev_data);
+		if (mz == NULL)
+			return -ENOMEM;
+
+		dmadev_shared_data = mz->addr;
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+			memset(dmadev_shared_data->data, 0,
+			       sizeof(dmadev_shared_data->data));
+	}
+
+	return 0;
+}
+
+static struct rte_dmadev *
+dmadev_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t dev_id;
+
+	dev = dmadev_find(name);
+	if (dev != NULL) {
+		RTE_DMADEV_LOG(ERR, "DMA device already allocated\n");
+		return NULL;
+	}
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	dev_id = dmadev_find_free_dev();
+	if (dev_id == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices\n");
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+	dev->data = &dmadev_shared_data->data[dev_id];
+	dev->data->dev_id = dev_id;
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+
+	return dev;
+}
+
+static struct rte_dmadev *
+dmadev_attach_secondary(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t i;
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %s is not driven by the primary process\n",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[i];
+	dev->data = &dmadev_shared_data->data[i];
+	dev->dev_private = dev->data->dev_private;
+
+	return dev;
+}
+
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dmadev_allocate(name);
+	else
+		dev = dmadev_attach_secondary(name);
+
+	if (dev == NULL)
+		return NULL;
+	dev->state = RTE_DMADEV_ATTACHED;
+
+	return dev;
+}
+
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev)
+{
+	void *dev_private_tmp;
+
+	if (dev == NULL)
+		return -EINVAL;
+
+	if (dev->state == RTE_DMADEV_UNUSED)
+		return 0;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
+
+	dev_private_tmp = dev->dev_private;
+	memset(dev, 0, sizeof(struct rte_dmadev));
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev->dev_private = dev_private_tmp;
+	dev->state = RTE_DMADEV_UNUSED;
+
+	return 0;
+}
+
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name)
+{
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+	return dmadev_find(name);
+}
+
+int
+rte_dmadev_get_dev_id(const char *name)
+{
+	struct rte_dmadev *dev = rte_dmadev_get_device_by_name(name);
+	if (dev != NULL)
+		return dev->data->dev_id;
+	return -EINVAL;
+}
+
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id)
+{
+	return (dev_id < RTE_DMADEV_MAX_DEVS) &&
+		rte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED;
+}
+
+uint16_t
+rte_dmadev_count(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
+			count++;
+	}
+
+	return count;
+}
+
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dmadev_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->device = dev->device;
+	dev_info->nb_vchans = dev->data->dev_conf.max_vchans;
+
+	return 0;
+}
+
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->max_vchans == 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure zero vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->max_vchans > info.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure too many vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_silent &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_SILENT)) {
+		RTE_DMADEV_LOG(ERR, "Device %u don't support silent\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf);
+	if (ret == 0)
+		memcpy(&dev->data->dev_conf, dev_conf, sizeof(*dev_conf));
+
+	return ret;
+}
+
+int
+rte_dmadev_start(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already started\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->data->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dmadev_stop(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started == 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already stopped\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dmadev_close(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped before closing\n", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	return (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	bool src_is_dev, dst_is_dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u number of descriptors invalid\n", dev_id);
+		return -EINVAL;
+	}
+	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->src_port.port_type == RTE_DMADEV_PORT_NONE && src_is_dev) ||
+	    (conf->src_port.port_type != RTE_DMADEV_PORT_NONE && !src_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u source port type invalid\n", dev_id);
+		return -EINVAL;
+	}
+	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->dst_port.port_type == RTE_DMADEV_PORT_NONE && dst_is_dev) ||
+	    (conf->dst_port.port_type != RTE_DMADEV_PORT_NONE && !dst_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u destination port type invalid\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, conf);
+}
+
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (stats == NULL)
+		return -EINVAL;
+	if (vchan >= dev->data->dev_conf.max_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	memset(stats, 0, sizeof(struct rte_dmadev_stats));
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dmadev_stats));
+}
+
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->data->dev_conf.max_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (f == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+
+	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
+	fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
+	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
+	fprintf(f, "  max_vchans_configured: %u\n", info.nb_vchans);
+	fprintf(f, "  silent_mode: %s\n",
+		dev->data->dev_conf.enable_silent ? "on" : "off");
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
+
+int
+rte_dmadev_selftest(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
+	return (*dev->dev_ops->dev_selftest)(dev_id);
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000..bf94179
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,1044 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ * Copyright(c) 2021 Marvell International Ltd.
+ * Copyright(c) 2021 SmartShare Systems.
+ */
+
+#ifndef _RTE_DMADEV_H_
+#define _RTE_DMADEV_H_
+
+/**
+ * @file rte_dmadev.h
+ *
+ * RTE DMA (Direct Memory Access) device APIs.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW-DMA-channel |               | HW-DMA-channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW-DMA-Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel. e.g. Application could create
+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+ * virtual DMA channel 1 for memory-to-device transfer scenario.
+ *
+ * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
+ * PCI/SoC device probing phase performed at EAL initialization time. And could
+ * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
+ * phase.
+ *
+ * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dmadev_configure()
+ *     - rte_dmadev_vchan_setup()
+ *     - rte_dmadev_start()
+ *
+ * Then, the application can invoke dataplane APIs to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dmadev_configure() or rte_dmadev_vchan_setup()), it must invoke
+ * rte_dmadev_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dmadev_start() again. The dataplane APIs should not be
+ * invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the
+ * rte_dmadev_close() function.
+ *
+ * The dataplane APIs include two parts:
+ * The first part is the submission of operation requests:
+ *     - rte_dmadev_copy()
+ *     - rte_dmadev_copy_sg()
+ *     - rte_dmadev_fill()
+ *     - rte_dmadev_submit()
+ *
+ * These APIs could work with different virtual DMA channels which have
+ * different contexts.
+ *
+ * The first three APIs are used to submit the operation request to the virtual
+ * DMA channel, if the submission is successful, a uint16_t ring_idx is
+ * returned, otherwise a negative number is returned.
+ *
+ * The last API was used to issue doorbell to hardware, and also there are flags
+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
+ * same work.
+ *
+ * The second part is to obtain the result of requests:
+ *     - rte_dmadev_completed()
+ *         - return the number of operation requests completed successfully.
+ *     - rte_dmadev_completed_status()
+ *         - return the number of operation requests completed.
+ *
+ * @note If the dmadev works in silent mode (@see RTE_DMADEV_CAPA_SILENT),
+ * application does not invoke the above two completed APIs.
+ *
+ * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
+ * rte_dmadev_fill()) returned, the rules are as follows:
+ *     - ring_idx for each virtual DMA channel are independent.
+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *       when it reach UINT16_MAX, it wraps back to zero.
+ *     - This ring_idx can be used by applications to track per-operation
+ *       metadata in an application-defined circular ring.
+ *     - The initial ring_idx of a virtual DMA channel is zero, after the
+ *       device is stopped, the ring_idx needs to be reset to zero.
+ *
+ * One example:
+ *     - step-1: start one dmadev
+ *     - step-2: enqueue a copy operation, the ring_idx return is 0
+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1
+ *     - ...
+ *     - step-101: stop the dmadev
+ *     - step-102: start the dmadev
+ *     - step-103: enqueue a copy operation, the cookie return is 0
+ *     - ...
+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *
+ * The DMA operation address used in enqueue APIs (i.e. rte_dmadev_copy(),
+ * rte_dmadev_copy_sg(), rte_dmadev_fill()) defined as rte_iova_t type. The
+ * dmadev supports two types of address: memory address and device address.
+ *
+ * - memory address: the source and destination address of the memory-to-memory
+ * transfer type, or the source address of the memory-to-device transfer type,
+ * or the destination address of the device-to-memory transfer type.
+ * @note If the device support SVA (@see RTE_DMADEV_CAPA_SVA), the memory
+ * address can be any VA address, otherwise it must be an IOVA address.
+ *
+ * - device address: the source and destination address of the device-to-device
+ * transfer type, or the source address of the device-to-memory transfer type,
+ * or the destination address of the memory-to-device transfer type.
+ *
+ * By default, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
+ *
+ */
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the device identifier for the named DMA device.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   Returns DMA device identifier on success.
+ *   - <0: Failure to find named DMA device.
+ */
+__rte_experimental
+int
+rte_dmadev_get_dev_id(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_count(void);
+
+/* Enumerates DMA device capabilities. */
+#define RTE_DMADEV_CAPA_MEM_TO_MEM	(1ull << 0)
+/**< DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_MEM_TO_DEV	(1ull << 1)
+/**< DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_MEM	(1ull << 2)
+/**< DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_DEV	(1ull << 3)
+/**< DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_SVA		(1ull << 4)
+/**< DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_SILENT		(1ull << 5)
+/**< DMA device support work in silent mode.
+ * In this mode, application don't required to invoke rte_dmadev_completed*()
+ * API.
+ *
+ * @see struct rte_dmadev_conf::silent_mode
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY	(1ull << 32)
+/**< DMA device support copy ops.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY_SG	(1ull << 33)
+/**< DMA device support scatter-list copy ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_FILL	(1ull << 34)
+/**< DMA device support fill ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ */
+struct rte_dmadev_info {
+	struct rte_device *device; /**< Generic Device information. */
+	uint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channels supported. */
+	uint16_t max_desc;
+	/**< Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/**< Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   - =0: Success, driver updates the information of the DMA device.
+ *   - <0: Error code returned by the driver info get function.
+ *
+ */
+__rte_experimental
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ */
+struct rte_dmadev_conf {
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channel to use.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dmadev_info which get from rte_dmadev_info_get().
+	 */
+	bool enable_silent;
+	/**< Indicates whether to enable silent mode.
+	 * false-default mode, true-silent mode.
+	 * This value can be set to true only when the SILENT capability is
+	 * supported.
+	 *
+	 * @see RTE_DMADEV_CAPA_SILENT
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dmadev_conf
+ *   object.
+ *
+ * @return
+ *   - =0: Success, device configured.
+ *   - <0: Error code returned by the driver configuration function.
+ */
+__rte_experimental
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device started.
+ *   - <0: Error code returned by the driver start function.
+ */
+__rte_experimental
+int
+rte_dmadev_start(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dmadev_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device stopped.
+ *   - <0: Error code returned by the driver stop function.
+ */
+__rte_experimental
+int
+rte_dmadev_stop(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *  - =0: Successfully close device
+ *  - <0: Failure to close device
+ */
+__rte_experimental
+int
+rte_dmadev_close(uint16_t dev_id);
+
+/**
+ * rte_dma_direction - DMA transfer direction defines.
+ */
+enum rte_dma_direction {
+	RTE_DMA_DIR_MEM_TO_MEM,
+	/**< DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV,
+	/**< DMA transfer direction - from memory to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is SoCs memory) to device (which is host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM,
+	/**< DMA transfer direction - from device to memory.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to memory (which is SoCs memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV,
+	/**< DMA transfer direction - from device to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to the device (which is another host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+};
+
+/**
+ * enum rte_dmadev_port_type - DMA access port type defines.
+ *
+ * @see struct rte_dmadev_port_param::port_type
+ */
+enum rte_dmadev_port_type {
+	RTE_DMADEV_PORT_NONE,
+	RTE_DMADEV_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dmadev_vchan_conf::src_port
+ * @see struct rte_dmadev_vchan_conf::dst_port
+ */
+struct rte_dmadev_port_param {
+	enum rte_dmadev_port_type port_type;
+	/**< The device access port type.
+	 * @see enum rte_dmadev_port_type
+	 */
+	union {
+		/** PCIe access port parameters.
+		 *
+		 * The following model shows SoC's PCIe module connects to
+		 * multiple PCIe hosts and multiple endpoints. The PCIe module
+		 * has an integrated DMA controller.
+		 *
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * \code{.unparsed}
+		 * System Bus
+		 *    |     ----------PCIe module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIe Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIe Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIe Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * \endcode
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		struct {
+			uint64_t coreid : 4; /**< PCIe core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			uint64_t pasid : 20;
+			/**< The pasid filed in TLP packet. */
+			uint64_t attr : 3;
+			/**< The attributes filed in TLP packet. */
+			uint64_t ph : 2;
+			/**< The processing hint filed in TLP packet. */
+			uint64_t st : 16;
+			/**< The steering tag filed in TLP packet. */
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ */
+struct rte_dmadev_vchan_conf {
+	enum rte_dma_direction direction;
+	/**< Transfer direction
+	 * @see enum rte_dma_direction
+	 */
+	uint16_t nb_desc;
+	/**< Number of descriptor for the virtual DMA channel */
+	struct rte_dmadev_port_param src_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+	struct rte_dmadev_port_param dst_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dmadev_vchan_conf object.
+ *
+ * @return
+ *   - >=0: Allocate success, it is the virtual DMA channel id. This value must
+ *          be less than the field 'max_vchans' of struct rte_dmadev_conf
+ *          which configured by rte_dmadev_configure().
+ *   - <0: Error code returned by the driver virtual channel setup function.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf);
+
+/**
+ * rte_dmadev_stats - running statistics.
+ */
+struct rte_dmadev_stats {
+	uint64_t submitted_count;
+	/**< Count of operations which were submitted to hardware. */
+	uint64_t completed_fail_count;
+	/**< Count of operations which failed to complete. */
+	uint64_t completed_count;
+	/**< Count of operations which successfully complete. */
+};
+
+#define RTE_DMADEV_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dmadev_stats
+ *   object.
+ *
+ * @return
+ *   - =0: Successfully retrieve stats.
+ *   - <0: Failure to retrieve stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   - =0: Successfully reset stats.
+ *   - <0: Failure to reset stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Non-zero otherwise.
+ */
+__rte_experimental
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger the dmadev self test.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - 0: Selftest successful.
+ *   - -ENOTSUP if the device doesn't support selftest
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_selftest(uint16_t dev_id);
+
+/**
+ * rte_dma_status_code - DMA transfer result status code defines.
+ */
+enum rte_dma_status_code {
+	RTE_DMA_STATUS_SUCCESSFUL,
+	/**< The operation completed successfully. */
+	RTE_DMA_STATUS_USRER_ABORT,
+	/**< The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPTED,
+	/**< The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/**< The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/**< The operation failed to complete due invalid destination
+	 * address.
+	 */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/**< The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/**< The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/**< The operation failed to complete due bus err. */
+	RTE_DMA_STATUS_DATA_POISION,
+	/**< The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/**< The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/**< The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the memory-to-device/
+	 * device-to-memory/device-to-device transfer scenario.
+	 */
+	RTE_DMA_STATUS_UNKNOWN = 0x100,
+	/**< The operation failed to complete due unknown reason.
+	 * The initial value is 256, which reserves space for future errors.
+	 */
+};
+
+/**
+ * rte_dmadev_sge - can hold scatter DMA operation request entry.
+ */
+struct rte_dmadev_sge {
+	rte_iova_t addr; /**< The DMA operation address. */
+	uint32_t length; /**< The DMA operation length. */
+};
+
+#include "rte_dmadev_core.h"
+
+/* DMA flags to augment operation preparation. */
+#define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
+/**< DMA fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ * If the specify DMA HW works in-order (it means it has default fence between
+ * operations), this flag could be NOP.
+ *
+ * @see rte_dmadev_copy()
+ * @see rte_dmadev_copy_sg()
+ * @see rte_dmadev_fill()
+ */
+
+#define RTE_DMA_OP_FLAG_SUBMIT	(1ull << 1)
+/**< DMA submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ */
+
+#define RTE_DMA_OP_FLAG_LLC	(1ull << 2)
+/**< DMA write data to low level cache hint.
+ * Used for performance optimization, this is just a hint, and there is no
+ * capability bit for this, driver should not return error if this flag was set.
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy job.
+ *   - <0: Error code returned by the driver copy function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+		uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+#endif
+
+	return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter list copy operation to be performed by hardware, if
+ * the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell
+ * to begin this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The pointer of source scatter entry array.
+ * @param dst
+ *   The pointer of destination scatter entry array.
+ * @param nb_src
+ *   The number of source scatter entry.
+ * @param nb_dst
+ *   The number of destination scatter entry.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy scatterlist job.
+ *   - <0: Error code returned by the driver copy scatterlist function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
+		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
+		   uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+#endif
+
+	return (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued fill job.
+ *   - <0: Error code returned by the driver fill function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
+		rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+#endif
+
+	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dmadev_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   - =0: Successfully trigger hardware.
+ *   - <0: Failure to trigger hardware.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+#endif
+
+	return (*dev->submit)(dev, vchan);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed. This return value
+ *   must be less than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		     uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans || nb_cpls == 0)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been completed, and the
+ * operations result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   This is a pointer to an array of length 'nb_cpls' that holds the completion
+ *   status code of each operation.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that completed. This return value must be less
+ *   than or equal to the value of nb_cpls.
+ *   If this number is greater than zero (assuming n), then n values in the
+ *   status array are also set.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
+			    const uint16_t nb_cpls, uint16_t *last_idx,
+			    enum rte_dma_status_code *status)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    nb_cpls == 0 || status == NULL)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_H_ */
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000..0122f67
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#ifndef _RTE_DMADEV_CORE_H_
+#define _RTE_DMADEV_CORE_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device internal header.
+ *
+ * This header contains internal data types, that are used by the DMA devices
+ * in order to expose their ops to the class.
+ *
+ * Applications should not use these API directly.
+ *
+ */
+
+struct rte_dmadev;
+
+typedef int (*rte_dmadev_info_get_t)(const struct rte_dmadev *dev,
+				     struct rte_dmadev_info *dev_info,
+				     uint32_t info_sz);
+/**< @internal Used to get device information of a device. */
+
+typedef int (*rte_dmadev_configure_t)(struct rte_dmadev *dev,
+				      const struct rte_dmadev_conf *dev_conf);
+/**< @internal Used to configure a device. */
+
+typedef int (*rte_dmadev_start_t)(struct rte_dmadev *dev);
+/**< @internal Used to start a configured device. */
+
+typedef int (*rte_dmadev_stop_t)(struct rte_dmadev *dev);
+/**< @internal Used to stop a configured device. */
+
+typedef int (*rte_dmadev_close_t)(struct rte_dmadev *dev);
+/**< @internal Used to close a configured device. */
+
+typedef int (*rte_dmadev_vchan_setup_t)(struct rte_dmadev *dev,
+				const struct rte_dmadev_vchan_conf *conf);
+/**< @internal Used to allocate and set up a virtual DMA channel. */
+
+typedef int (*rte_dmadev_stats_get_t)(const struct rte_dmadev *dev,
+			uint16_t vchan, struct rte_dmadev_stats *stats,
+			uint32_t stats_sz);
+/**< @internal Used to retrieve basic statistics. */
+
+typedef int (*rte_dmadev_stats_reset_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to reset basic statistics. */
+
+typedef int (*rte_dmadev_dump_t)(const struct rte_dmadev *dev, FILE *f);
+/**< @internal Used to dump internal information. */
+
+typedef int (*rte_dmadev_selftest_t)(uint16_t dev_id);
+/**< @internal Used to start dmadev selftest. */
+
+typedef int (*rte_dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 rte_iova_t src, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a copy operation. */
+
+typedef int (*rte_dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
+				    const struct rte_dmadev_sge *src,
+				    const struct rte_dmadev_sge *dst,
+				    uint16_t nb_src, uint16_t nb_dst,
+				    uint64_t flags);
+/**< @internal Used to enqueue a scatter list copy operation. */
+
+typedef int (*rte_dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 uint64_t pattern, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a fill operation. */
+
+typedef int (*rte_dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to trigger hardware to begin working. */
+
+typedef uint16_t (*rte_dmadev_completed_t)(struct rte_dmadev *dev,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+/**< @internal Used to return number of successful completed operations. */
+
+typedef uint16_t (*rte_dmadev_completed_status_t)(struct rte_dmadev *dev,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+/**< @internal Used to return number of completed operations. */
+
+/**
+ * Possible states of a DMA device.
+ */
+enum rte_dmadev_state {
+	RTE_DMADEV_UNUSED = 0,
+	/**< Device is unused before being probed. */
+	RTE_DMADEV_ATTACHED,
+	/**< Device is attached when allocated in probing. */
+};
+
+/**
+ * DMA device operations function pointer table
+ */
+struct rte_dmadev_ops {
+	rte_dmadev_info_get_t dev_info_get;
+	rte_dmadev_configure_t dev_configure;
+	rte_dmadev_start_t dev_start;
+	rte_dmadev_stop_t dev_stop;
+	rte_dmadev_close_t dev_close;
+	rte_dmadev_vchan_setup_t vchan_setup;
+	rte_dmadev_stats_get_t stats_get;
+	rte_dmadev_stats_reset_t stats_reset;
+	rte_dmadev_dump_t dev_dump;
+	rte_dmadev_selftest_t dev_selftest;
+};
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_dmadev_data {
+	void *dev_private;
+	/**< PMD-specific private data.
+	 * This is a copy of the 'dev_private' field in the 'struct rte_dmadev'
+	 * from primary process, it is used by the secondary process to get
+	 * dev_private information.
+	 */
+	uint16_t dev_id; /**< Device [external] identifier. */
+	char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ *
+ * The dataplane APIs are located at the beginning of the structure, along
+ * with the pointer to where all the data elements for the particular device
+ * are stored in shared memory. This split scheme allows the function pointer
+ * and driver data to be per-process, while the actual configuration data for
+ * the device is shared.
+ * And the 'dev_private' field was placed in the first cache line to optimize
+ * performance because the PMD driver mainly depends on this field.
+ */
+struct rte_dmadev {
+	rte_dmadev_copy_t copy;
+	rte_dmadev_copy_sg_t copy_sg;
+	rte_dmadev_fill_t fill;
+	rte_dmadev_submit_t submit;
+	rte_dmadev_completed_t completed;
+	rte_dmadev_completed_status_t completed_status;
+	void *reserved_ptr; /**< Reserved for future IO function. */
+	void *dev_private;
+	/**< PMD-specific private data.
+	 *
+	 * - If is the primary process, after dmadev allocated by
+	 * rte_dmadev_pmd_allocate(), the PCI/SoC device probing should
+	 * initialize this field, and copy it's value to the 'dev_private'
+	 * field of 'struct rte_dmadev_data' which pointer by 'data' filed.
+	 *
+	 * - If is the secondary process, dmadev framework will initialize this
+	 * field by copy from 'dev_private' field of 'struct rte_dmadev_data'
+	 * which initialized by primary process.
+	 *
+	 * @note It's the primary process responsibility to deinitialize this
+	 * field after invoke rte_dmadev_pmd_release() in the PCI/SoC device
+	 * removing stage.
+	 */
+	struct rte_dmadev_data *data; /**< Pointer to device data. */
+	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
+	struct rte_device *device;
+	/**< Device info which supplied during device initialization. */
+	enum rte_dmadev_state state; /**< Flag indicating the device state. */
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+} __rte_cache_aligned;
+
+extern struct rte_dmadev rte_dmadevices[];
+
+#endif /* _RTE_DMADEV_CORE_H_ */
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000..45141f9
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_PMD_H_
+#define _RTE_DMADEV_PMD_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device PMD APIs
+ *
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param dev
+ *   Device to be released.
+ *
+ * @return
+ *   - 0 on success, negative on error
+ */
+__rte_internal
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev);
+
+/**
+ * @internal
+ * Return the DMA device based on the device name.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_PMD_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000..0f2ed4b
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,37 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dmadev_close;
+	rte_dmadev_completed;
+	rte_dmadev_completed_status;
+	rte_dmadev_configure;
+	rte_dmadev_copy;
+	rte_dmadev_copy_sg;
+	rte_dmadev_count;
+	rte_dmadev_dump;
+	rte_dmadev_fill;
+	rte_dmadev_get_dev_id;
+	rte_dmadev_info_get;
+	rte_dmadev_is_valid_dev;
+	rte_dmadev_selftest;
+	rte_dmadev_start;
+	rte_dmadev_stats_get;
+	rte_dmadev_stats_reset;
+	rte_dmadev_stop;
+	rte_dmadev_submit;
+	rte_dmadev_vchan_setup;
+
+	local: *;
+};
+
+INTERNAL {
+        global:
+
+	rte_dmadevices;
+	rte_dmadev_get_device_by_name;
+	rte_dmadev_pmd_allocate;
+	rte_dmadev_pmd_release;
+
+	local: *;
+};
+
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4..68d239f 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -60,6 +60,7 @@ libraries = [
         'bpf',
         'graph',
         'node',
+        'dmadev',
 ]
 
 if is_windows
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v10] dmadev: introduce DMA device library
  2021-07-20 12:46 ` [dpdk-dev] [PATCH v10] " Chengwen Feng
@ 2021-07-26  6:53   ` fengchengwen
  2021-07-26  8:31     ` Bruce Richardson
  2021-07-26 11:03     ` Morten Brørup
  0 siblings, 2 replies; 339+ messages in thread
From: fengchengwen @ 2021-07-26  6:53 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

Friendly ping.

On 2021/7/20 20:46, Chengwen Feng wrote:
> This patch introduce 'dmadevice' which is a generic type of DMA
> device.
> 
> The APIs of dmadev library exposes some generic operations which can
> enable configuration and I/O with the DMA devices.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> ---
> v10:
> * fix rte_dmadev_completed_status comment.

[snip]

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v10] dmadev: introduce DMA device library
  2021-07-26  6:53   ` fengchengwen
@ 2021-07-26  8:31     ` Bruce Richardson
  2021-07-27  3:57       ` fengchengwen
  2021-07-26 11:03     ` Morten Brørup
  1 sibling, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-07-26  8:31 UTC (permalink / raw)
  To: fengchengwen
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

On Mon, Jul 26, 2021 at 02:53:16PM +0800, fengchengwen wrote:
> Friendly ping.
> 
> On 2021/7/20 20:46, Chengwen Feng wrote:
> > This patch introduce 'dmadevice' which is a generic type of DMA
> > device.
> > 
> > The APIs of dmadev library exposes some generic operations which can
> > enable configuration and I/O with the DMA devices.
> > 
> > Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> > ---
> > v10:
> > * fix rte_dmadev_completed_status comment.
> 
> [snip]

I'm still working through porting over our driver(s) to the latest revisions,
and digging into the details of the error handling and approaches, looking
for any issues. For now, a couple of small suggestions and ideas:

* for the STATUS_UNKNOWN value, I think it should be renamed to
  ERROR_UNKNOWN since this is an error value. Alternatively, add a new
  "UNKNOWN_ERROR" entry in the list to cover this possibility i.e. we know
  status is an error, just not exactly what the error is.

* While we have errors for both invalid source or invalid destination
  addresses, I think we also should add a slightly more general error code
  for "INVALID_ADDR" to cover the case where one is bad but we are not sure
  which.

* Not sure exactly how to handle this, but I suspect we may need some sort
  of capability flag to cover behaviour on hitting an error. For HW using
  our original ioat driver, the jobs are done strictly in order and the HW
  will halt on error, while with hardware using newer idxd driver, things are
  done potentially out-of-order and other jobs continue after the failed
  job. I will come back with a more concrete proposal on this later, once I
  get both drivers up and working. I suspect we will encounter more
  edge-cases like this as people work on drivers.

Overall, functionally this patchset looks pretty good to me. One thing
that will be needed for merge into mainline is a chapter on dmadev for the
programmers guide document, plus any other necessary doc updates such as a
good release-note update for this new lib.

For what is here now though,

Acked-by: Bruce Richardson <bruce.richardson@intel.com>


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v10] dmadev: introduce DMA device library
  2021-07-26  6:53   ` fengchengwen
  2021-07-26  8:31     ` Bruce Richardson
@ 2021-07-26 11:03     ` Morten Brørup
  2021-07-26 11:21       ` Jerin Jacob
  1 sibling, 1 reply; 339+ messages in thread
From: Morten Brørup @ 2021-07-26 11:03 UTC (permalink / raw)
  To: fengchengwen
  Cc: dev, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, thomas, ferruh.yigit, bruce.richardson,
	jerinj, jerinjacobk, andrew.rybchenko

> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of fengchengwen
> 
> Friendly ping.
> 
> On 2021/7/20 20:46, Chengwen Feng wrote:
> > This patch introduce 'dmadevice' which is a generic type of DMA
> > device.
> >
> > The APIs of dmadev library exposes some generic operations which can
> > enable configuration and I/O with the DMA devices.
> >
> > Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> > ---
> > v10:
> > * fix rte_dmadev_completed_status comment.
> 
> [snip]

Back from vacation. It generally looks good, so you may add:

Acked-By: Morten Brørup <mb@smartsharesystems.com>


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v10] dmadev: introduce DMA device library
  2021-07-26 11:03     ` Morten Brørup
@ 2021-07-26 11:21       ` Jerin Jacob
  0 siblings, 0 replies; 339+ messages in thread
From: Jerin Jacob @ 2021-07-26 11:21 UTC (permalink / raw)
  To: Morten Brørup
  Cc: fengchengwen, dpdk-dev, Nipun Gupta, Hemant Agrawal,
	Maxime Coquelin, Honnappa Nagarahalli, David Marchand,
	Satananda Burla, Prasun Kapoor, Ananyev, Konstantin,
	Thomas Monjalon, Ferruh Yigit, Richardson, Bruce, Jerin Jacob,
	Andrew Rybchenko

On Mon, Jul 26, 2021 at 4:34 PM Morten Brørup <mb@smartsharesystems.com> wrote:
>
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of fengchengwen
> >
> > Friendly ping.
> >
> > On 2021/7/20 20:46, Chengwen Feng wrote:
> > > This patch introduce 'dmadevice' which is a generic type of DMA
> > > device.
> > >
> > > The APIs of dmadev library exposes some generic operations which can
> > > enable configuration and I/O with the DMA devices.
> > >
> > > Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> > > ---
> > > v10:
> > > * fix rte_dmadev_completed_status comment.
> >
> > [snip]
>
> Back from vacation. It generally looks good, so you may add:
>
> Acked-By: Morten Brørup <mb@smartsharesystems.com>


You can add my Acked-by after splitting the patches.

>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v11 0/2] support dmadev
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (13 preceding siblings ...)
  2021-07-20 12:46 ` [dpdk-dev] [PATCH v10] " Chengwen Feng
@ 2021-07-27  3:39 ` Chengwen Feng
  2021-07-27  3:39   ` [dpdk-dev] [PATCH v11 1/2] dmadev: introduce DMA device library Chengwen Feng
  2021-07-27  3:40   ` [dpdk-dev] [PATCH v11 2/2] doc: add dmadev library guide Chengwen Feng
  2021-07-29 13:06 ` [dpdk-dev] [PATCH v12 0/6] support dmadev Chengwen Feng
                   ` (14 subsequent siblings)
  29 siblings, 2 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-07-27  3:39 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch set contains two patch:
1) dmadev library, include header files and implementations.
2) dmadev programming guide.

Chengwen Feng (2):
  dmadev: introduce DMA device library
  doc: add dmadev library guide

---
v11:
* rename RTE_DMA_STATUS_UNKNOWN to RTE_DMA_STATUS_ERROR_UNKNOWN.
* add RTE_DMA_STATUS_INVALID_ADDR marco.
* update release-note.
* add acked-by for 1/2 patch.
* add dmadev programming guide which is 2/2 patch.
v10:
* fix rte_dmadev_completed_status comment.
v9:
* delete RTE_ASSERT invoke.
* make sure vchan setup fail when device started.
* add check src/dst port parameter when setup vchan.
* rename some variables in rte_dmadev.c.
v8:
* fix pcie access port diagram doxygen problem.
* fix typo.
* fix compile warning when enable DMADEV_DEBUG.
v7:
* add rte_dmadev_get_dev_id API.
* fix typo.
* use the default macro assignment scheme.
* rename RTE_DMA_DEV_CAPA_* to RTE_DMADEV_CAPA_*.
* rename rte_dmadev_conf.silent_mode to enable_silent.
* add memset when get stats.
v6:
* delete fence capability.
* delete vchan_release ops.
* copy_sg direct use src/dst/nb_src/nb_dst as paramter.
* define rte_dma_direction, don't support multiple direction in the
  same vchan.
* fix segment fault when allocate.
* fix typo.
* fix comments format.
v5:
* add doxy-api-* file modify.
* use RTE_LOG_REGISTER_DEFAULT.
* fix typo.
* resolve some incorrect comments.
* fix some doxgen problem.
* fix version.map still hold rte_dmadev_completed_fails.
v4:
* replace xxx_complete_fails with xxx_completed_status.
* add SILENT capability, also a silent_mode in rte_dmadev_conf.
* add op_flag_llc for performance.
* rename dmadev_xxx_t to rte_dmadev_xxx_t to avoid namespace conflict.
* delete filed 'enqueued_count' from rte_dmadev_stats.
* make rte_dmadev hold 'dev_private' filed.
* add RTE_DMA_STATUS_NOT_ATTEMPED status code.
* rename RTE_DMA_STATUS_ACTIVE_DROP to RTE_DMA_STATUS_USER_ABORT.
* rename rte_dma_sg(e) to rte_dmadev_sg(e) to make sure all struct
  prefix with rte_dmadev.
* put the comment afterwards.
* fix some doxgen problem.
* delete macro RTE_DMADEV_VALID_DEV_ID_OR_RET and
  RTE_DMADEV_PTR_OR_ERR_RET.
* replace strlcpy with rte_strscpy.
* other minor modifications from review comment.
v3:
* rm reset and fill_sg ops.
* rm MT-safe capabilities.
* add submit flag.
* redefine rte_dma_sg to implement asymmetric copy.
* delete some reserved field for future use.
* rearrangement rte_dmadev/rte_dmadev_data struct.
* refresh rte_dmadev.h copyright.
* update vchan setup parameter.
* modified some inappropriate descriptions.
* arrange version.map alphabetically.
* other minor modifications from review comment.

 MAINTAINERS                            |    4 +
 config/rte_config.h                    |    3 +
 doc/api/doxy-api-index.md              |    1 +
 doc/api/doxy-api.conf.in               |    1 +
 doc/guides/prog_guide/dmadev.rst       |  123 ++++
 doc/guides/prog_guide/index.rst        |    1 +
 doc/guides/rel_notes/release_21_08.rst |    6 +
 lib/dmadev/meson.build                 |    7 +
 lib/dmadev/rte_dmadev.c                |  563 +++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 1049 ++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h           |  182 ++++++
 lib/dmadev/rte_dmadev_pmd.h            |   72 +++
 lib/dmadev/version.map                 |   37 ++
 lib/meson.build                        |    1 +
 14 files changed, 2050 insertions(+)
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v11 1/2] dmadev: introduce DMA device library
  2021-07-27  3:39 ` [dpdk-dev] [PATCH v11 0/2] support dmadev Chengwen Feng
@ 2021-07-27  3:39   ` Chengwen Feng
  2021-07-28 11:13     ` Bruce Richardson
  2021-07-27  3:40   ` [dpdk-dev] [PATCH v11 2/2] doc: add dmadev library guide Chengwen Feng
  1 sibling, 1 reply; 339+ messages in thread
From: Chengwen Feng @ 2021-07-27  3:39 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce 'dmadevice' which is a generic type of DMA
device.

The APIs of dmadev library exposes some generic operations which can
enable configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 MAINTAINERS                            |    4 +
 config/rte_config.h                    |    3 +
 doc/api/doxy-api-index.md              |    1 +
 doc/api/doxy-api.conf.in               |    1 +
 doc/guides/rel_notes/release_21_08.rst |    6 +
 lib/dmadev/meson.build                 |    7 +
 lib/dmadev/rte_dmadev.c                |  563 +++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 1049 ++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h           |  182 ++++++
 lib/dmadev/rte_dmadev_pmd.h            |   72 +++
 lib/dmadev/version.map                 |   37 ++
 lib/meson.build                        |    1 +
 12 files changed, 1926 insertions(+)
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index af2a91d..e01a07f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -495,6 +495,10 @@ F: drivers/raw/skeleton/
 F: app/test/test_rawdev.c
 F: doc/guides/prog_guide/rawdev.rst
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+
 
 Memory Pool Drivers
 -------------------
diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c..331a431 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -81,6 +81,9 @@
 /* rawdev defines */
 #define RTE_RAWDEV_MAX_DEVS 64
 
+/* dmadev defines */
+#define RTE_DMADEV_MAX_DEVS 64
+
 /* ip_fragmentation defines */
 #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
 #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107..ce08250 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -27,6 +27,7 @@ The public API headers are grouped by topics:
   [event_timer_adapter]    (@ref rte_event_timer_adapter.h),
   [event_crypto_adapter]   (@ref rte_event_crypto_adapter.h),
   [rawdev]             (@ref rte_rawdev.h),
+  [dmadev]             (@ref rte_dmadev.h),
   [metrics]            (@ref rte_metrics.h),
   [bitrate]            (@ref rte_bitrate.h),
   [latency]            (@ref rte_latencystats.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 325a019..109ec1f 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -35,6 +35,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
                           @TOPDIR@/lib/distributor \
+                          @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/efd \
                           @TOPDIR@/lib/ethdev \
                           @TOPDIR@/lib/eventdev \
diff --git a/doc/guides/rel_notes/release_21_08.rst b/doc/guides/rel_notes/release_21_08.rst
index efcb0f3..5f32aba 100644
--- a/doc/guides/rel_notes/release_21_08.rst
+++ b/doc/guides/rel_notes/release_21_08.rst
@@ -122,6 +122,12 @@ New Features
   The experimental PMD power management API now supports managing
   multiple Ethernet Rx queues per lcore.
 
+* **Added dmadev library support.**
+
+  The dmadev library provides a DMA device framework for management and
+  provisioning of hardware and software DMA poll mode drivers, defining generic
+  APIs which support a number of different DMA operations.
+
 
 Removed Items
 -------------
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000..d2fc85e
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+sources = files('rte_dmadev.c')
+headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000..b4f5498
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,563 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
+
+static const char *mz_rte_dmadev_data = "rte_dmadev_data";
+/* Shared memory between primary and secondary processes. */
+static struct {
+	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
+} *dmadev_shared_data;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dmadev_logtype, INFO);
+#define RTE_DMADEV_LOG(level, ...) \
+	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "" __VA_ARGS__)
+
+/* Macros to check for valid device id */
+#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dmadev_is_valid_dev(dev_id)) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+static int
+dmadev_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMADEV_LOG(ERR, "Name can't be NULL\n");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMADEV_LOG(ERR, "Zero length DMA device name\n");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
+		RTE_DMADEV_LOG(ERR, "DMA device name is too long\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint16_t
+dmadev_find_free_dev(void)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (dmadev_shared_data->data[i].dev_name[0] == '\0')
+			return i;
+	}
+
+	return RTE_DMADEV_MAX_DEVS;
+}
+
+static struct rte_dmadev*
+dmadev_find(const char *name)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&
+		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
+			return &rte_dmadevices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dmadev_shared_data_prepare(void)
+{
+	const struct rte_memzone *mz;
+
+	if (dmadev_shared_data == NULL) {
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			/* Allocate port data and ownership shared memory. */
+			mz = rte_memzone_reserve(mz_rte_dmadev_data,
+					 sizeof(*dmadev_shared_data),
+					 rte_socket_id(), 0);
+		} else
+			mz = rte_memzone_lookup(mz_rte_dmadev_data);
+		if (mz == NULL)
+			return -ENOMEM;
+
+		dmadev_shared_data = mz->addr;
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+			memset(dmadev_shared_data->data, 0,
+			       sizeof(dmadev_shared_data->data));
+	}
+
+	return 0;
+}
+
+static struct rte_dmadev *
+dmadev_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t dev_id;
+
+	dev = dmadev_find(name);
+	if (dev != NULL) {
+		RTE_DMADEV_LOG(ERR, "DMA device already allocated\n");
+		return NULL;
+	}
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	dev_id = dmadev_find_free_dev();
+	if (dev_id == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices\n");
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+	dev->data = &dmadev_shared_data->data[dev_id];
+	dev->data->dev_id = dev_id;
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+
+	return dev;
+}
+
+static struct rte_dmadev *
+dmadev_attach_secondary(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t i;
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %s is not driven by the primary process\n",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[i];
+	dev->data = &dmadev_shared_data->data[i];
+	dev->dev_private = dev->data->dev_private;
+
+	return dev;
+}
+
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dmadev_allocate(name);
+	else
+		dev = dmadev_attach_secondary(name);
+
+	if (dev == NULL)
+		return NULL;
+	dev->state = RTE_DMADEV_ATTACHED;
+
+	return dev;
+}
+
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev)
+{
+	void *dev_private_tmp;
+
+	if (dev == NULL)
+		return -EINVAL;
+
+	if (dev->state == RTE_DMADEV_UNUSED)
+		return 0;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
+
+	dev_private_tmp = dev->dev_private;
+	memset(dev, 0, sizeof(struct rte_dmadev));
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev->dev_private = dev_private_tmp;
+	dev->state = RTE_DMADEV_UNUSED;
+
+	return 0;
+}
+
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name)
+{
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+	return dmadev_find(name);
+}
+
+int
+rte_dmadev_get_dev_id(const char *name)
+{
+	struct rte_dmadev *dev = rte_dmadev_get_device_by_name(name);
+	if (dev != NULL)
+		return dev->data->dev_id;
+	return -EINVAL;
+}
+
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id)
+{
+	return (dev_id < RTE_DMADEV_MAX_DEVS) &&
+		rte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED;
+}
+
+uint16_t
+rte_dmadev_count(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
+			count++;
+	}
+
+	return count;
+}
+
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dmadev_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->device = dev->device;
+	dev_info->nb_vchans = dev->data->dev_conf.max_vchans;
+
+	return 0;
+}
+
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->max_vchans == 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure zero vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->max_vchans > info.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure too many vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_silent &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_SILENT)) {
+		RTE_DMADEV_LOG(ERR, "Device %u don't support silent\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf);
+	if (ret == 0)
+		memcpy(&dev->data->dev_conf, dev_conf, sizeof(*dev_conf));
+
+	return ret;
+}
+
+int
+rte_dmadev_start(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already started\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->data->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dmadev_stop(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started == 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already stopped\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dmadev_close(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped before closing\n", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	return (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	bool src_is_dev, dst_is_dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u number of descriptors invalid\n", dev_id);
+		return -EINVAL;
+	}
+	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->src_port.port_type == RTE_DMADEV_PORT_NONE && src_is_dev) ||
+	    (conf->src_port.port_type != RTE_DMADEV_PORT_NONE && !src_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u source port type invalid\n", dev_id);
+		return -EINVAL;
+	}
+	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->dst_port.port_type == RTE_DMADEV_PORT_NONE && dst_is_dev) ||
+	    (conf->dst_port.port_type != RTE_DMADEV_PORT_NONE && !dst_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u destination port type invalid\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, conf);
+}
+
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (stats == NULL)
+		return -EINVAL;
+	if (vchan >= dev->data->dev_conf.max_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	memset(stats, 0, sizeof(struct rte_dmadev_stats));
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dmadev_stats));
+}
+
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->data->dev_conf.max_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (f == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+
+	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
+	fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
+	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
+	fprintf(f, "  max_vchans_configured: %u\n", info.nb_vchans);
+	fprintf(f, "  silent_mode: %s\n",
+		dev->data->dev_conf.enable_silent ? "on" : "off");
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
+
+int
+rte_dmadev_selftest(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
+	return (*dev->dev_ops->dev_selftest)(dev_id);
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000..6b6a958
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,1049 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ * Copyright(c) 2021 Marvell International Ltd.
+ * Copyright(c) 2021 SmartShare Systems.
+ */
+
+#ifndef _RTE_DMADEV_H_
+#define _RTE_DMADEV_H_
+
+/**
+ * @file rte_dmadev.h
+ *
+ * RTE DMA (Direct Memory Access) device APIs.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW-DMA-channel |               | HW-DMA-channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW-DMA-Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel. e.g. Application could create
+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+ * virtual DMA channel 1 for memory-to-device transfer scenario.
+ *
+ * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
+ * PCI/SoC device probing phase performed at EAL initialization time. And could
+ * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
+ * phase.
+ *
+ * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dmadev_configure()
+ *     - rte_dmadev_vchan_setup()
+ *     - rte_dmadev_start()
+ *
+ * Then, the application can invoke dataplane APIs to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dmadev_configure() or rte_dmadev_vchan_setup()), it must invoke
+ * rte_dmadev_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dmadev_start() again. The dataplane APIs should not be
+ * invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the
+ * rte_dmadev_close() function.
+ *
+ * The dataplane APIs include two parts:
+ * The first part is the submission of operation requests:
+ *     - rte_dmadev_copy()
+ *     - rte_dmadev_copy_sg()
+ *     - rte_dmadev_fill()
+ *     - rte_dmadev_submit()
+ *
+ * These APIs could work with different virtual DMA channels which have
+ * different contexts.
+ *
+ * The first three APIs are used to submit the operation request to the virtual
+ * DMA channel, if the submission is successful, a uint16_t ring_idx is
+ * returned, otherwise a negative number is returned.
+ *
+ * The last API was used to issue doorbell to hardware, and also there are flags
+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
+ * same work.
+ *
+ * The second part is to obtain the result of requests:
+ *     - rte_dmadev_completed()
+ *         - return the number of operation requests completed successfully.
+ *     - rte_dmadev_completed_status()
+ *         - return the number of operation requests completed.
+ *
+ * @note If the dmadev works in silent mode (@see RTE_DMADEV_CAPA_SILENT),
+ * application does not invoke the above two completed APIs.
+ *
+ * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
+ * rte_dmadev_fill()) returned, the rules are as follows:
+ *     - ring_idx for each virtual DMA channel are independent.
+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *       when it reach UINT16_MAX, it wraps back to zero.
+ *     - This ring_idx can be used by applications to track per-operation
+ *       metadata in an application-defined circular ring.
+ *     - The initial ring_idx of a virtual DMA channel is zero, after the
+ *       device is stopped, the ring_idx needs to be reset to zero.
+ *
+ * One example:
+ *     - step-1: start one dmadev
+ *     - step-2: enqueue a copy operation, the ring_idx return is 0
+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1
+ *     - ...
+ *     - step-101: stop the dmadev
+ *     - step-102: start the dmadev
+ *     - step-103: enqueue a copy operation, the cookie return is 0
+ *     - ...
+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *
+ * The DMA operation address used in enqueue APIs (i.e. rte_dmadev_copy(),
+ * rte_dmadev_copy_sg(), rte_dmadev_fill()) defined as rte_iova_t type. The
+ * dmadev supports two types of address: memory address and device address.
+ *
+ * - memory address: the source and destination address of the memory-to-memory
+ * transfer type, or the source address of the memory-to-device transfer type,
+ * or the destination address of the device-to-memory transfer type.
+ * @note If the device support SVA (@see RTE_DMADEV_CAPA_SVA), the memory
+ * address can be any VA address, otherwise it must be an IOVA address.
+ *
+ * - device address: the source and destination address of the device-to-device
+ * transfer type, or the source address of the device-to-memory transfer type,
+ * or the destination address of the memory-to-device transfer type.
+ *
+ * By default, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
+ *
+ */
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the device identifier for the named DMA device.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   Returns DMA device identifier on success.
+ *   - <0: Failure to find named DMA device.
+ */
+__rte_experimental
+int
+rte_dmadev_get_dev_id(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_count(void);
+
+/* Enumerates DMA device capabilities. */
+#define RTE_DMADEV_CAPA_MEM_TO_MEM	(1ull << 0)
+/**< DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_MEM_TO_DEV	(1ull << 1)
+/**< DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_MEM	(1ull << 2)
+/**< DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_DEV	(1ull << 3)
+/**< DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_SVA		(1ull << 4)
+/**< DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_SILENT		(1ull << 5)
+/**< DMA device support work in silent mode.
+ * In this mode, application don't required to invoke rte_dmadev_completed*()
+ * API.
+ *
+ * @see struct rte_dmadev_conf::silent_mode
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY	(1ull << 32)
+/**< DMA device support copy ops.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY_SG	(1ull << 33)
+/**< DMA device support scatter-list copy ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_FILL	(1ull << 34)
+/**< DMA device support fill ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ */
+struct rte_dmadev_info {
+	struct rte_device *device; /**< Generic Device information. */
+	uint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channels supported. */
+	uint16_t max_desc;
+	/**< Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/**< Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   - =0: Success, driver updates the information of the DMA device.
+ *   - <0: Error code returned by the driver info get function.
+ *
+ */
+__rte_experimental
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ */
+struct rte_dmadev_conf {
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channel to use.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dmadev_info which get from rte_dmadev_info_get().
+	 */
+	bool enable_silent;
+	/**< Indicates whether to enable silent mode.
+	 * false-default mode, true-silent mode.
+	 * This value can be set to true only when the SILENT capability is
+	 * supported.
+	 *
+	 * @see RTE_DMADEV_CAPA_SILENT
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dmadev_conf
+ *   object.
+ *
+ * @return
+ *   - =0: Success, device configured.
+ *   - <0: Error code returned by the driver configuration function.
+ */
+__rte_experimental
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device started.
+ *   - <0: Error code returned by the driver start function.
+ */
+__rte_experimental
+int
+rte_dmadev_start(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dmadev_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device stopped.
+ *   - <0: Error code returned by the driver stop function.
+ */
+__rte_experimental
+int
+rte_dmadev_stop(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *  - =0: Successfully close device
+ *  - <0: Failure to close device
+ */
+__rte_experimental
+int
+rte_dmadev_close(uint16_t dev_id);
+
+/**
+ * rte_dma_direction - DMA transfer direction defines.
+ */
+enum rte_dma_direction {
+	RTE_DMA_DIR_MEM_TO_MEM,
+	/**< DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV,
+	/**< DMA transfer direction - from memory to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is SoCs memory) to device (which is host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM,
+	/**< DMA transfer direction - from device to memory.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to memory (which is SoCs memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV,
+	/**< DMA transfer direction - from device to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to the device (which is another host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+};
+
+/**
+ * enum rte_dmadev_port_type - DMA access port type defines.
+ *
+ * @see struct rte_dmadev_port_param::port_type
+ */
+enum rte_dmadev_port_type {
+	RTE_DMADEV_PORT_NONE,
+	RTE_DMADEV_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dmadev_vchan_conf::src_port
+ * @see struct rte_dmadev_vchan_conf::dst_port
+ */
+struct rte_dmadev_port_param {
+	enum rte_dmadev_port_type port_type;
+	/**< The device access port type.
+	 * @see enum rte_dmadev_port_type
+	 */
+	union {
+		/** PCIe access port parameters.
+		 *
+		 * The following model shows SoC's PCIe module connects to
+		 * multiple PCIe hosts and multiple endpoints. The PCIe module
+		 * has an integrated DMA controller.
+		 *
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * \code{.unparsed}
+		 * System Bus
+		 *    |     ----------PCIe module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIe Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIe Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIe Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * \endcode
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		struct {
+			uint64_t coreid : 4; /**< PCIe core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			uint64_t pasid : 20;
+			/**< The pasid filed in TLP packet. */
+			uint64_t attr : 3;
+			/**< The attributes filed in TLP packet. */
+			uint64_t ph : 2;
+			/**< The processing hint filed in TLP packet. */
+			uint64_t st : 16;
+			/**< The steering tag filed in TLP packet. */
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ */
+struct rte_dmadev_vchan_conf {
+	enum rte_dma_direction direction;
+	/**< Transfer direction
+	 * @see enum rte_dma_direction
+	 */
+	uint16_t nb_desc;
+	/**< Number of descriptor for the virtual DMA channel */
+	struct rte_dmadev_port_param src_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+	struct rte_dmadev_port_param dst_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dmadev_vchan_conf object.
+ *
+ * @return
+ *   - >=0: Allocate success, it is the virtual DMA channel id. This value must
+ *          be less than the field 'max_vchans' of struct rte_dmadev_conf
+ *          which configured by rte_dmadev_configure().
+ *   - <0: Error code returned by the driver virtual channel setup function.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf);
+
+/**
+ * rte_dmadev_stats - running statistics.
+ */
+struct rte_dmadev_stats {
+	uint64_t submitted_count;
+	/**< Count of operations which were submitted to hardware. */
+	uint64_t completed_fail_count;
+	/**< Count of operations which failed to complete. */
+	uint64_t completed_count;
+	/**< Count of operations which successfully complete. */
+};
+
+#define RTE_DMADEV_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dmadev_stats
+ *   object.
+ *
+ * @return
+ *   - =0: Successfully retrieve stats.
+ *   - <0: Failure to retrieve stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   - =0: Successfully reset stats.
+ *   - <0: Failure to reset stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Non-zero otherwise.
+ */
+__rte_experimental
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger the dmadev self test.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - 0: Selftest successful.
+ *   - -ENOTSUP if the device doesn't support selftest
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_selftest(uint16_t dev_id);
+
+/**
+ * rte_dma_status_code - DMA transfer result status code defines.
+ */
+enum rte_dma_status_code {
+	RTE_DMA_STATUS_SUCCESSFUL,
+	/**< The operation completed successfully. */
+	RTE_DMA_STATUS_USRER_ABORT,
+	/**< The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPTED,
+	/**< The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/**< The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/**< The operation failed to complete due invalid destination
+	 * address.
+	 */
+	RTE_DMA_STATUS_INVALID_ADDR,
+	/**< The operation failed to complete due invalid source or destination
+	 * address, cover the case that only knows the address error, but not
+	 * sure which address error.
+	 */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/**< The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/**< The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/**< The operation failed to complete due bus err. */
+	RTE_DMA_STATUS_DATA_POISION,
+	/**< The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/**< The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/**< The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the memory-to-device/
+	 * device-to-memory/device-to-device transfer scenario.
+	 */
+	RTE_DMA_STATUS_ERROR_UNKNOWN = 0x100,
+	/**< The operation failed to complete due unknown reason.
+	 * The initial value is 256, which reserves space for future errors.
+	 */
+};
+
+/**
+ * rte_dmadev_sge - can hold scatter DMA operation request entry.
+ */
+struct rte_dmadev_sge {
+	rte_iova_t addr; /**< The DMA operation address. */
+	uint32_t length; /**< The DMA operation length. */
+};
+
+#include "rte_dmadev_core.h"
+
+/* DMA flags to augment operation preparation. */
+#define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
+/**< DMA fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ * If the specify DMA HW works in-order (it means it has default fence between
+ * operations), this flag could be NOP.
+ *
+ * @see rte_dmadev_copy()
+ * @see rte_dmadev_copy_sg()
+ * @see rte_dmadev_fill()
+ */
+
+#define RTE_DMA_OP_FLAG_SUBMIT	(1ull << 1)
+/**< DMA submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ */
+
+#define RTE_DMA_OP_FLAG_LLC	(1ull << 2)
+/**< DMA write data to low level cache hint.
+ * Used for performance optimization, this is just a hint, and there is no
+ * capability bit for this, driver should not return error if this flag was set.
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy job.
+ *   - <0: Error code returned by the driver copy function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+		uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+#endif
+
+	return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter list copy operation to be performed by hardware, if
+ * the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell
+ * to begin this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The pointer of source scatter entry array.
+ * @param dst
+ *   The pointer of destination scatter entry array.
+ * @param nb_src
+ *   The number of source scatter entry.
+ * @param nb_dst
+ *   The number of destination scatter entry.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy scatterlist job.
+ *   - <0: Error code returned by the driver copy scatterlist function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
+		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
+		   uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+#endif
+
+	return (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued fill job.
+ *   - <0: Error code returned by the driver fill function.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
+		rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+#endif
+
+	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dmadev_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   - =0: Successfully trigger hardware.
+ *   - <0: Failure to trigger hardware.
+ */
+__rte_experimental
+static inline int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+#endif
+
+	return (*dev->submit)(dev, vchan);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed. This return value
+ *   must be less than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		     uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans || nb_cpls == 0)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been completed, and the
+ * operations result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   This is a pointer to an array of length 'nb_cpls' that holds the completion
+ *   status code of each operation.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that completed. This return value must be less
+ *   than or equal to the value of nb_cpls.
+ *   If this number is greater than zero (assuming n), then n values in the
+ *   status array are also set.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
+			    const uint16_t nb_cpls, uint16_t *last_idx,
+			    enum rte_dma_status_code *status)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    nb_cpls == 0 || status == NULL)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_H_ */
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000..0122f67
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#ifndef _RTE_DMADEV_CORE_H_
+#define _RTE_DMADEV_CORE_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device internal header.
+ *
+ * This header contains internal data types, that are used by the DMA devices
+ * in order to expose their ops to the class.
+ *
+ * Applications should not use these API directly.
+ *
+ */
+
+struct rte_dmadev;
+
+typedef int (*rte_dmadev_info_get_t)(const struct rte_dmadev *dev,
+				     struct rte_dmadev_info *dev_info,
+				     uint32_t info_sz);
+/**< @internal Used to get device information of a device. */
+
+typedef int (*rte_dmadev_configure_t)(struct rte_dmadev *dev,
+				      const struct rte_dmadev_conf *dev_conf);
+/**< @internal Used to configure a device. */
+
+typedef int (*rte_dmadev_start_t)(struct rte_dmadev *dev);
+/**< @internal Used to start a configured device. */
+
+typedef int (*rte_dmadev_stop_t)(struct rte_dmadev *dev);
+/**< @internal Used to stop a configured device. */
+
+typedef int (*rte_dmadev_close_t)(struct rte_dmadev *dev);
+/**< @internal Used to close a configured device. */
+
+typedef int (*rte_dmadev_vchan_setup_t)(struct rte_dmadev *dev,
+				const struct rte_dmadev_vchan_conf *conf);
+/**< @internal Used to allocate and set up a virtual DMA channel. */
+
+typedef int (*rte_dmadev_stats_get_t)(const struct rte_dmadev *dev,
+			uint16_t vchan, struct rte_dmadev_stats *stats,
+			uint32_t stats_sz);
+/**< @internal Used to retrieve basic statistics. */
+
+typedef int (*rte_dmadev_stats_reset_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to reset basic statistics. */
+
+typedef int (*rte_dmadev_dump_t)(const struct rte_dmadev *dev, FILE *f);
+/**< @internal Used to dump internal information. */
+
+typedef int (*rte_dmadev_selftest_t)(uint16_t dev_id);
+/**< @internal Used to start dmadev selftest. */
+
+typedef int (*rte_dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 rte_iova_t src, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a copy operation. */
+
+typedef int (*rte_dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
+				    const struct rte_dmadev_sge *src,
+				    const struct rte_dmadev_sge *dst,
+				    uint16_t nb_src, uint16_t nb_dst,
+				    uint64_t flags);
+/**< @internal Used to enqueue a scatter list copy operation. */
+
+typedef int (*rte_dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 uint64_t pattern, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a fill operation. */
+
+typedef int (*rte_dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to trigger hardware to begin working. */
+
+typedef uint16_t (*rte_dmadev_completed_t)(struct rte_dmadev *dev,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+/**< @internal Used to return number of successful completed operations. */
+
+typedef uint16_t (*rte_dmadev_completed_status_t)(struct rte_dmadev *dev,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+/**< @internal Used to return number of completed operations. */
+
+/**
+ * Possible states of a DMA device.
+ */
+enum rte_dmadev_state {
+	RTE_DMADEV_UNUSED = 0,
+	/**< Device is unused before being probed. */
+	RTE_DMADEV_ATTACHED,
+	/**< Device is attached when allocated in probing. */
+};
+
+/**
+ * DMA device operations function pointer table
+ */
+struct rte_dmadev_ops {
+	rte_dmadev_info_get_t dev_info_get;
+	rte_dmadev_configure_t dev_configure;
+	rte_dmadev_start_t dev_start;
+	rte_dmadev_stop_t dev_stop;
+	rte_dmadev_close_t dev_close;
+	rte_dmadev_vchan_setup_t vchan_setup;
+	rte_dmadev_stats_get_t stats_get;
+	rte_dmadev_stats_reset_t stats_reset;
+	rte_dmadev_dump_t dev_dump;
+	rte_dmadev_selftest_t dev_selftest;
+};
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_dmadev_data {
+	void *dev_private;
+	/**< PMD-specific private data.
+	 * This is a copy of the 'dev_private' field in the 'struct rte_dmadev'
+	 * from primary process, it is used by the secondary process to get
+	 * dev_private information.
+	 */
+	uint16_t dev_id; /**< Device [external] identifier. */
+	char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ *
+ * The dataplane APIs are located at the beginning of the structure, along
+ * with the pointer to where all the data elements for the particular device
+ * are stored in shared memory. This split scheme allows the function pointer
+ * and driver data to be per-process, while the actual configuration data for
+ * the device is shared.
+ * And the 'dev_private' field was placed in the first cache line to optimize
+ * performance because the PMD driver mainly depends on this field.
+ */
+struct rte_dmadev {
+	rte_dmadev_copy_t copy;
+	rte_dmadev_copy_sg_t copy_sg;
+	rte_dmadev_fill_t fill;
+	rte_dmadev_submit_t submit;
+	rte_dmadev_completed_t completed;
+	rte_dmadev_completed_status_t completed_status;
+	void *reserved_ptr; /**< Reserved for future IO function. */
+	void *dev_private;
+	/**< PMD-specific private data.
+	 *
+	 * - If is the primary process, after dmadev allocated by
+	 * rte_dmadev_pmd_allocate(), the PCI/SoC device probing should
+	 * initialize this field, and copy it's value to the 'dev_private'
+	 * field of 'struct rte_dmadev_data' which pointer by 'data' filed.
+	 *
+	 * - If is the secondary process, dmadev framework will initialize this
+	 * field by copy from 'dev_private' field of 'struct rte_dmadev_data'
+	 * which initialized by primary process.
+	 *
+	 * @note It's the primary process responsibility to deinitialize this
+	 * field after invoke rte_dmadev_pmd_release() in the PCI/SoC device
+	 * removing stage.
+	 */
+	struct rte_dmadev_data *data; /**< Pointer to device data. */
+	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
+	struct rte_device *device;
+	/**< Device info which supplied during device initialization. */
+	enum rte_dmadev_state state; /**< Flag indicating the device state. */
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+} __rte_cache_aligned;
+
+extern struct rte_dmadev rte_dmadevices[];
+
+#endif /* _RTE_DMADEV_CORE_H_ */
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000..45141f9
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_PMD_H_
+#define _RTE_DMADEV_PMD_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device PMD APIs
+ *
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param dev
+ *   Device to be released.
+ *
+ * @return
+ *   - 0 on success, negative on error
+ */
+__rte_internal
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev);
+
+/**
+ * @internal
+ * Return the DMA device based on the device name.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_PMD_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000..0f2ed4b
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,37 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dmadev_close;
+	rte_dmadev_completed;
+	rte_dmadev_completed_status;
+	rte_dmadev_configure;
+	rte_dmadev_copy;
+	rte_dmadev_copy_sg;
+	rte_dmadev_count;
+	rte_dmadev_dump;
+	rte_dmadev_fill;
+	rte_dmadev_get_dev_id;
+	rte_dmadev_info_get;
+	rte_dmadev_is_valid_dev;
+	rte_dmadev_selftest;
+	rte_dmadev_start;
+	rte_dmadev_stats_get;
+	rte_dmadev_stats_reset;
+	rte_dmadev_stop;
+	rte_dmadev_submit;
+	rte_dmadev_vchan_setup;
+
+	local: *;
+};
+
+INTERNAL {
+        global:
+
+	rte_dmadevices;
+	rte_dmadev_get_device_by_name;
+	rte_dmadev_pmd_allocate;
+	rte_dmadev_pmd_release;
+
+	local: *;
+};
+
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4..68d239f 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -60,6 +60,7 @@ libraries = [
         'bpf',
         'graph',
         'node',
+        'dmadev',
 ]
 
 if is_windows
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v11 2/2] doc: add dmadev library guide
  2021-07-27  3:39 ` [dpdk-dev] [PATCH v11 0/2] support dmadev Chengwen Feng
  2021-07-27  3:39   ` [dpdk-dev] [PATCH v11 1/2] dmadev: introduce DMA device library Chengwen Feng
@ 2021-07-27  3:40   ` Chengwen Feng
  2021-07-29 11:02     ` Jerin Jacob
  1 sibling, 1 reply; 339+ messages in thread
From: Chengwen Feng @ 2021-07-27  3:40 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch adds dmadev library guide.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
 doc/guides/prog_guide/dmadev.rst | 123 +++++++++++++++++++++++++++++++++++++++
 doc/guides/prog_guide/index.rst  |   1 +
 2 files changed, 124 insertions(+)
 create mode 100644 doc/guides/prog_guide/dmadev.rst

diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
new file mode 100644
index 0000000..5bad598
--- /dev/null
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -0,0 +1,123 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright 2021 HiSilicon Limited
+
+DMA Device Library
+====================
+
+The DMA library provides a DMA device framework for management and provisioning
+of hardware and software DMA poll mode drivers, defining generic APIs which
+support a number of different DMA operations.
+
+
+Design Principles
+-----------------
+
+The DMA library follows the same basic principles as those used in DPDK's
+Ethernet Device framework and the RegEx framework. The DMA framework provides
+a generic DMA device framework which supports both physical (hardware)
+and virtual (software) DMA devices as well as a generic DMA API which allows
+DMA devices to be managed and configured and supports DMA operations to be
+provisioned on DMA poll mode driver.
+
+Figure below outlines the model of the DMA framework built on:
+
+.. code-block:: console
+
+    +-------------+   +-------------+       +-------------+
+    | virtual DMA |   | virtual DMA |       | virtual DMA |
+    | channel     |   | channel     |       | channel     |
+    +-------------+   +-------------+       +-------------+
+           |                 |                     |
+           -------------------                     |
+                    |                              |
+              +----------+                    +----------+
+              |  dmadev  |                    |  dmadev  |
+              +----------+                    +----------+
+                    |                              |
+            +--------------+                +--------------+
+            | hardware DMA |                | hardware DMA |
+            | channel      |                | channel      |
+            +--------------+                +--------------+
+                    |                              |
+                    --------------------------------
+                                    |
+                             +--------------+
+                             | hardware DMA |
+                             | controller   |
+                             +--------------+
+
+ * The DMA controller could have multiple hardware DMA channels (aka. hardware
+   DMA queues), each hardware DMA channel should be represented by a dmadev.
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+   channel represents a different transfer context. The DMA operation request
+   must be submitted to the virtual DMA channel. e.g. Application could create
+   virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+   virtual DMA channel 1 for memory-to-device transfer scenario.
+
+
+Device Management
+-----------------
+
+Device Creation
+~~~~~~~~~~~~~~~
+
+Physical DMA controller is discovered during the PCI probe/enumeration of the
+EAL function which is executed at DPDK initialization, based on their PCI
+device identifier, each unique PCI BDF (bus/bridge, device, function). Specific
+physical DMA controller, like other physical devices in DPDK can be listed using
+the EAL command line options.
+
+And then dmadevs are dynamically allocated by rte_dmadev_pmd_allocate() based on
+the number of hardware DMA channels.
+
+
+Device Identification
+~~~~~~~~~~~~~~~~~~~~~
+
+Each DMA device, whether physical or virtual is uniquely designated by two
+identifiers:
+
+- A unique device index used to designate the DMA device in all functions
+  exported by the DMA API.
+
+- A device name used to designate the DMA device in console messages, for
+  administration or debugging purposes.
+
+
+Device Configuration
+~~~~~~~~~~~~~~~~~~~~
+
+The rte_dmadev_configure API is used to configure a DMA device.
+
+.. code-block:: c
+
+   int rte_dmadev_configure(uint16_t dev_id,
+                            const struct rte_dmadev_conf *dev_conf);
+
+The ``rte_dmadev_conf`` structure is used to pass the configuration parameters
+for the DMA device for example maximum number of virtual DMA channels,
+indication of whether to enable silent mode.
+
+
+Configuration of Virtual DMA Channels
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The rte_dmadev_vchan_setup API is used to configure a virtual DMA channel.
+
+.. code-block:: c
+
+   int rte_dmadev_vchan_setup(uint16_t dev_id,
+		                      const struct rte_dmadev_vchan_conf *conf);
+
+The ``rte_dmadev_vchan_conf`` structure is used to pass the configuration
+parameters for the virtual DMA channel for example transfer direction, number of
+descriptor for the virtual DMA channel, source device access port parameter,
+destination device access port parameter.
+
+
+Device Features and Capabilities
+--------------------------------
+
+DMA devices may support different feature set. In order to get the supported PMD
+features ``rte_dmadev_info_get`` API which returns the info of the device and
+it's supported features.
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 2dce507..0abea06 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -29,6 +29,7 @@ Programmer's Guide
     regexdev
     rte_security
     rawdev
+    dmadev
     link_bonding_poll_mode_drv_lib
     timer_lib
     hash_lib
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v10] dmadev: introduce DMA device library
  2021-07-26  8:31     ` Bruce Richardson
@ 2021-07-27  3:57       ` fengchengwen
  0 siblings, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-07-27  3:57 UTC (permalink / raw)
  To: Bruce Richardson, jerinj, jerinjacobk, mb
  Cc: thomas, ferruh.yigit, andrew.rybchenko, dev, nipun.gupta,
	hemant.agrawal, maxime.coquelin, honnappa.nagarahalli,
	david.marchand, sburla, pkapoor, konstantin.ananyev

Thanks

The first two point (STATUS_UNKNOWN->STATUS_ERROR_UNKNOWN, INVALID_ADDR) already
fix in V11.

@Burce @Jerin @Morten
In addition, programming guide is added in v11, please take a look, thanks.


On 2021/7/26 16:31, Bruce Richardson wrote:
> On Mon, Jul 26, 2021 at 02:53:16PM +0800, fengchengwen wrote:
>> Friendly ping.
>>
>> On 2021/7/20 20:46, Chengwen Feng wrote:
>>> This patch introduce 'dmadevice' which is a generic type of DMA
>>> device.
>>>
>>> The APIs of dmadev library exposes some generic operations which can
>>> enable configuration and I/O with the DMA devices.
>>>
>>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
>>> ---
>>> v10:
>>> * fix rte_dmadev_completed_status comment.
>>
>> [snip]
> 
> I'm still working through porting over our driver(s) to the latest revisions,
> and digging into the details of the error handling and approaches, looking
> for any issues. For now, a couple of small suggestions and ideas:
> 
> * for the STATUS_UNKNOWN value, I think it should be renamed to
>   ERROR_UNKNOWN since this is an error value. Alternatively, add a new
>   "UNKNOWN_ERROR" entry in the list to cover this possibility i.e. we know
>   status is an error, just not exactly what the error is.
> 
> * While we have errors for both invalid source or invalid destination
>   addresses, I think we also should add a slightly more general error code
>   for "INVALID_ADDR" to cover the case where one is bad but we are not sure
>   which.
> 
> * Not sure exactly how to handle this, but I suspect we may need some sort
>   of capability flag to cover behaviour on hitting an error. For HW using
>   our original ioat driver, the jobs are done strictly in order and the HW
>   will halt on error, while with hardware using newer idxd driver, things are
>   done potentially out-of-order and other jobs continue after the failed
>   job. I will come back with a more concrete proposal on this later, once I
>   get both drivers up and working. I suspect we will encounter more
>   edge-cases like this as people work on drivers.
> 
> Overall, functionally this patchset looks pretty good to me. One thing
> that will be needed for merge into mainline is a chapter on dmadev for the
> programmers guide document, plus any other necessary doc updates such as a
> good release-note update for this new lib.
> 
> For what is here now though,
> 
> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> 
> .
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v11 1/2] dmadev: introduce DMA device library
  2021-07-27  3:39   ` [dpdk-dev] [PATCH v11 1/2] dmadev: introduce DMA device library Chengwen Feng
@ 2021-07-28 11:13     ` Bruce Richardson
  2021-07-29  1:26       ` fengchengwen
  0 siblings, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-07-28 11:13 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

On Tue, Jul 27, 2021 at 11:39:59AM +0800, Chengwen Feng wrote:
> This patch introduce 'dmadevice' which is a generic type of DMA
> device.
> 
> The APIs of dmadev library exposes some generic operations which can
> enable configuration and I/O with the DMA devices.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> ---

Thanks for this. Before it gets merged, I believe it needs to be split
further into multiple patches (say 4 or so) rather than adding the whole
lib in one go.

Normally, I believe the split would be something like:
* basic device structures and infrastructure e.g. alloc and release
  functions
* device config functions (and structures to go along with them)
  such as configure and queue_setup
* data plane functions

Documentation would be included in each of the patches, rather than done as
a block at the end.

Besides that, I have one small additional requests for the API. Based off
feedback for ioat driver, we added in the following function to that API,
and we probably need something similar in dmadev:

rte_ioat_burst_capacity()

For our implementation this returns the number of elements that can be
enqueued to the ring, at least for the current burst/batch of packets. We
did the API this way because there can be additional limits beyond ring
size on each individual burst beyond just the raw ring capacity, e.g. even
if there are 4k ring elements free, there may be limits on the max burst
size the hardware can do, or limits on the number of outstanding
batches etc.

Therefore can I request the addition of rte_dmadev_burst_capacity() [or
something similarly named] to the basic dmadev API set. For most hardware,
I think this will likely be the remaining free ring size, but I don't
believe the API should commit to that. The use case it was added for was to
enable an application which needs to do a multi-copy operation to check
that all copies can fit or not before enqueuing the first one. This is
important for hardware that doesn't have scatter-gather list support.

/Bruce

PS: One typo in code flagged below too.

<snip>
> + */
> +enum rte_dma_status_code {
> +	RTE_DMA_STATUS_SUCCESSFUL,
> +	/**< The operation completed successfully. */
> +	RTE_DMA_STATUS_USRER_ABORT,

              Typo here ^^^


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v11 1/2] dmadev: introduce DMA device library
  2021-07-28 11:13     ` Bruce Richardson
@ 2021-07-29  1:26       ` fengchengwen
  2021-07-29  9:15         ` Bruce Richardson
  2021-07-29 10:44         ` Jerin Jacob
  0 siblings, 2 replies; 339+ messages in thread
From: fengchengwen @ 2021-07-29  1:26 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

Thanks, inline comment

On 2021/7/28 19:13, Bruce Richardson wrote:
> On Tue, Jul 27, 2021 at 11:39:59AM +0800, Chengwen Feng wrote:
>> This patch introduce 'dmadevice' which is a generic type of DMA
>> device.
>>
>> The APIs of dmadev library exposes some generic operations which can
>> enable configuration and I/O with the DMA devices.
>>
>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
>> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
>> Acked-by: Morten Brørup <mb@smartsharesystems.com>
>> ---
> 
> Thanks for this. Before it gets merged, I believe it needs to be split
> further into multiple patches (say 4 or so) rather than adding the whole
> lib in one go.
> 
> Normally, I believe the split would be something like:
> * basic device structures and infrastructure e.g. alloc and release
>   functions
> * device config functions (and structures to go along with them)
>   such as configure and queue_setup
> * data plane functions
> 

I will try for it
Maybe one patch for public file, one for pmd header file, one for
implementation, and last for doc.

> Documentation would be included in each of the patches, rather than done as
> a block at the end.
> 
> Besides that, I have one small additional requests for the API. Based off
> feedback for ioat driver, we added in the following function to that API,
> and we probably need something similar in dmadev:
> 
> rte_ioat_burst_capacity()
> 
> For our implementation this returns the number of elements that can be
> enqueued to the ring, at least for the current burst/batch of packets. We
> did the API this way because there can be additional limits beyond ring
> size on each individual burst beyond just the raw ring capacity, e.g. even
> if there are 4k ring elements free, there may be limits on the max burst
> size the hardware can do, or limits on the number of outstanding
> batches etc.
> 
> Therefore can I request the addition of rte_dmadev_burst_capacity() [or
> something similarly named] to the basic dmadev API set. For most hardware,
> I think this will likely be the remaining free ring size, but I don't
> believe the API should commit to that. The use case it was added for was to
> enable an application which needs to do a multi-copy operation to check
> that all copies can fit or not before enqueuing the first one. This is
> important for hardware that doesn't have scatter-gather list support.

Remaining capacity can be inferred by ring_idx which return from enqueue and
dequeue APIs.
So I don't think this API needs to be added.

For scatter-gather list, there maybe a hardware limit for max src or dst entry
size, I prefer add 'max_sges' filed in struct rte_dmadev_info to indicate it.

> 
> /Bruce
> 
> PS: One typo in code flagged below too.
> 
> <snip>
>> + */
>> +enum rte_dma_status_code {
>> +	RTE_DMA_STATUS_SUCCESSFUL,
>> +	/**< The operation completed successfully. */
>> +	RTE_DMA_STATUS_USRER_ABORT,
> 
>               Typo here ^^^
> 

OK, USRER->USER will fix later

> .
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v11 1/2] dmadev: introduce DMA device library
  2021-07-29  1:26       ` fengchengwen
@ 2021-07-29  9:15         ` Bruce Richardson
  2021-07-29 13:33           ` fengchengwen
  2021-07-29 10:44         ` Jerin Jacob
  1 sibling, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-07-29  9:15 UTC (permalink / raw)
  To: fengchengwen
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

On Thu, Jul 29, 2021 at 09:26:31AM +0800, fengchengwen wrote:
> Thanks, inline comment
> 
> On 2021/7/28 19:13, Bruce Richardson wrote:
> > On Tue, Jul 27, 2021 at 11:39:59AM +0800, Chengwen Feng wrote:
> >> This patch introduce 'dmadevice' which is a generic type of DMA
> >> device.
> >>
> >> The APIs of dmadev library exposes some generic operations which can
> >> enable configuration and I/O with the DMA devices.
> >>
> >> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> >> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> >> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> >> ---
> > 
> > Thanks for this. Before it gets merged, I believe it needs to be split
> > further into multiple patches (say 4 or so) rather than adding the whole
> > lib in one go.
> > 
> > Normally, I believe the split would be something like:
> > * basic device structures and infrastructure e.g. alloc and release
> >   functions
> > * device config functions (and structures to go along with them)
> >   such as configure and queue_setup
> > * data plane functions
> > 
> 
> I will try for it
> Maybe one patch for public file, one for pmd header file, one for
> implementation, and last for doc.
> 
> > Documentation would be included in each of the patches, rather than done as
> > a block at the end.
> > 
> > Besides that, I have one small additional requests for the API. Based off
> > feedback for ioat driver, we added in the following function to that API,
> > and we probably need something similar in dmadev:
> > 
> > rte_ioat_burst_capacity()
> > 
> > For our implementation this returns the number of elements that can be
> > enqueued to the ring, at least for the current burst/batch of packets. We
> > did the API this way because there can be additional limits beyond ring
> > size on each individual burst beyond just the raw ring capacity, e.g. even
> > if there are 4k ring elements free, there may be limits on the max burst
> > size the hardware can do, or limits on the number of outstanding
> > batches etc.
> > 
> > Therefore can I request the addition of rte_dmadev_burst_capacity() [or
> > something similarly named] to the basic dmadev API set. For most hardware,
> > I think this will likely be the remaining free ring size, but I don't
> > believe the API should commit to that. The use case it was added for was to
> > enable an application which needs to do a multi-copy operation to check
> > that all copies can fit or not before enqueuing the first one. This is
> > important for hardware that doesn't have scatter-gather list support.
> 
> Remaining capacity can be inferred by ring_idx which return from enqueue and
> dequeue APIs.
> So I don't think this API needs to be added.
> 

Yes, the app can always track it itself, but I still see value in adding
this API. However, so long as you are open to having it added later, it
doesn't matter if it's not present in the first versions of this API merged
in.

/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v11 1/2] dmadev: introduce DMA device library
  2021-07-29  1:26       ` fengchengwen
  2021-07-29  9:15         ` Bruce Richardson
@ 2021-07-29 10:44         ` Jerin Jacob
  2021-07-29 13:30           ` fengchengwen
  1 sibling, 1 reply; 339+ messages in thread
From: Jerin Jacob @ 2021-07-29 10:44 UTC (permalink / raw)
  To: fengchengwen
  Cc: Bruce Richardson, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On Thu, Jul 29, 2021 at 6:56 AM fengchengwen <fengchengwen@huawei.com> wrote:
>
> Thanks, inline comment
>
> On 2021/7/28 19:13, Bruce Richardson wrote:
> > On Tue, Jul 27, 2021 at 11:39:59AM +0800, Chengwen Feng wrote:
> >> This patch introduce 'dmadevice' which is a generic type of DMA
> >> device.
> >>
> >> The APIs of dmadev library exposes some generic operations which can
> >> enable configuration and I/O with the DMA devices.
> >>
> >> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> >> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> >> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> >> ---
> >
> > Thanks for this. Before it gets merged, I believe it needs to be split
> > further into multiple patches (say 4 or so) rather than adding the whole
> > lib in one go.
> >
> > Normally, I believe the split would be something like:
> > * basic device structures and infrastructure e.g. alloc and release
> >   functions
> > * device config functions (and structures to go along with them)
> >   such as configure and queue_setup
> > * data plane functions
> >
>
> I will try for it
> Maybe one patch for public file, one for pmd header file, one for
> implementation, and last for doc.

+1.

>
> > Documentation would be included in each of the patches, rather than done as
> > a block at the end.
> >
> > Besides that, I have one small additional requests for the API. Based off
> > feedback for ioat driver, we added in the following function to that API,
> > and we probably need something similar in dmadev:
> >
> > rte_ioat_burst_capacity()
> >
> > For our implementation this returns the number of elements that can be
> > enqueued to the ring, at least for the current burst/batch of packets. We
> > did the API this way because there can be additional limits beyond ring
> > size on each individual burst beyond just the raw ring capacity, e.g. even
> > if there are 4k ring elements free, there may be limits on the max burst
> > size the hardware can do, or limits on the number of outstanding
> > batches etc.
> >
> > Therefore can I request the addition of rte_dmadev_burst_capacity() [or
> > something similarly named] to the basic dmadev API set. For most hardware,
> > I think this will likely be the remaining free ring size, but I don't
> > believe the API should commit to that. The use case it was added for was to
> > enable an application which needs to do a multi-copy operation to check
> > that all copies can fit or not before enqueuing the first one. This is
> > important for hardware that doesn't have scatter-gather list support.

Yes. Could you add the following to enable scatter-gather support in
rte_dmadev_info::sge_max
/**<Maximum allowed number of scatter-gather entries in a single sg call. */
        uint16_t sge_max;


>
> Remaining capacity can be inferred by ring_idx which return from enqueue and
> dequeue APIs.
> So I don't think this API needs to be added.
>
> For scatter-gather list, there maybe a hardware limit for max src or dst entry
> size, I prefer add 'max_sges' filed in struct rte_dmadev_info to indicate it.
>
> >
> > /Bruce
> >
> > PS: One typo in code flagged below too.
> >
> > <snip>
> >> + */
> >> +enum rte_dma_status_code {
> >> +    RTE_DMA_STATUS_SUCCESSFUL,
> >> +    /**< The operation completed successfully. */
> >> +    RTE_DMA_STATUS_USRER_ABORT,
> >
> >               Typo here ^^^
> >
>
> OK, USRER->USER will fix later
>
> > .
> >

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v11 2/2] doc: add dmadev library guide
  2021-07-27  3:40   ` [dpdk-dev] [PATCH v11 2/2] doc: add dmadev library guide Chengwen Feng
@ 2021-07-29 11:02     ` Jerin Jacob
  2021-07-29 13:13       ` fengchengwen
  0 siblings, 1 reply; 339+ messages in thread
From: Jerin Jacob @ 2021-07-29 11:02 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: Thomas Monjalon, Ferruh Yigit, Richardson, Bruce, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On Tue, Jul 27, 2021 at 9:13 AM Chengwen Feng <fengchengwen@huawei.com> wrote:
>
> This patch adds dmadev library guide.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> ---
>  doc/guides/prog_guide/dmadev.rst | 123 +++++++++++++++++++++++++++++++++++++++
>  doc/guides/prog_guide/index.rst  |   1 +
>  2 files changed, 124 insertions(+)
>  create mode 100644 doc/guides/prog_guide/dmadev.rst
>
> diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
> new file mode 100644
> index 0000000..5bad598
> --- /dev/null
> +++ b/doc/guides/prog_guide/dmadev.rst
> @@ -0,0 +1,123 @@
> +.. SPDX-License-Identifier: BSD-3-Clause
> +   Copyright 2021 HiSilicon Limited
> +
> +DMA Device Library
> +====================
> +
> +The DMA library provides a DMA device framework for management and provisioning
> +of hardware and software DMA poll mode drivers, defining generic APIs which
> +support a number of different DMA operations.
> +
> +
> +Design Principles
> +-----------------
> +
> +The DMA library follows the same basic principles as those used in DPDK's
> +Ethernet Device framework and the RegEx framework. The DMA framework provides
> +a generic DMA device framework which supports both physical (hardware)
> +and virtual (software) DMA devices as well as a generic DMA API which allows
> +DMA devices to be managed and configured and supports DMA operations to be
> +provisioned on DMA poll mode driver.
> +
> +Figure below outlines the model of the DMA framework built on:
> +
> +.. code-block:: console
> +
> +    +-------------+   +-------------+       +-------------+
> +    | virtual DMA |   | virtual DMA |       | virtual DMA |
> +    | channel     |   | channel     |       | channel     |
> +    +-------------+   +-------------+       +-------------+
> +           |                 |                     |
> +           -------------------                     |
> +                    |                              |
> +              +----------+                    +----------+
> +              |  dmadev  |                    |  dmadev  |
> +              +----------+                    +----------+
> +                    |                              |
> +            +--------------+                +--------------+
> +            | hardware DMA |                | hardware DMA |
> +            | channel      |                | channel      |
> +            +--------------+                +--------------+
> +                    |                              |
> +                    --------------------------------
> +                                    |
> +                             +--------------+
> +                             | hardware DMA |
> +                             | controller   |
> +                             +--------------+

Please change to .svg file.
See grep -ri "Inkscape" doc/guides/contributing/documentation.rst
for guidelines.

> +
> + * The DMA controller could have multiple hardware DMA channels (aka. hardware
> +   DMA queues), each hardware DMA channel should be represented by a dmadev.
> + * The dmadev could create multiple virtual DMA channels, each virtual DMA
> +   channel represents a different transfer context. The DMA operation request
> +   must be submitted to the virtual DMA channel. e.g. Application could create
> +   virtual DMA channel 0 for memory-to-memory transfer scenario, and create
> +   virtual DMA channel 1 for memory-to-device transfer scenario.
> +
> +
> +Device Management
> +-----------------
> +
> +Device Creation
> +~~~~~~~~~~~~~~~
> +
> +Physical DMA controller is discovered during the PCI probe/enumeration of the
> +EAL function which is executed at DPDK initialization, based on their PCI
> +device identifier, each unique PCI BDF (bus/bridge, device, function). Specific
> +physical DMA controller, like other physical devices in DPDK can be listed using
> +the EAL command line options.
> +
> +And then dmadevs are dynamically allocated by rte_dmadev_pmd_allocate() based on
> +the number of hardware DMA channels.
> +
> +
> +Device Identification
> +~~~~~~~~~~~~~~~~~~~~~
> +
> +Each DMA device, whether physical or virtual is uniquely designated by two
> +identifiers:
> +
> +- A unique device index used to designate the DMA device in all functions
> +  exported by the DMA API.
> +
> +- A device name used to designate the DMA device in console messages, for
> +  administration or debugging purposes.
> +
> +
> +Device Configuration
> +~~~~~~~~~~~~~~~~~~~~
> +
> +The rte_dmadev_configure API is used to configure a DMA device.
> +
> +.. code-block:: c
> +
> +   int rte_dmadev_configure(uint16_t dev_id,
> +                            const struct rte_dmadev_conf *dev_conf);
> +
> +The ``rte_dmadev_conf`` structure is used to pass the configuration parameters
> +for the DMA device for example maximum number of virtual DMA channels,
> +indication of whether to enable silent mode.
> +
> +
> +Configuration of Virtual DMA Channels
> +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> +
> +The rte_dmadev_vchan_setup API is used to configure a virtual DMA channel.
> +
> +.. code-block:: c
> +
> +   int rte_dmadev_vchan_setup(uint16_t dev_id,
> +                                     const struct rte_dmadev_vchan_conf *conf);
> +
> +The ``rte_dmadev_vchan_conf`` structure is used to pass the configuration
> +parameters for the virtual DMA channel for example transfer direction, number of
> +descriptor for the virtual DMA channel, source device access port parameter,
> +destination device access port parameter.


Looks good. Some other section really useful and it is specific to
DMADEV could be added

1) ring_idx management, You can copy the texts from API header file or so
2) rte_dmadev_completed() management.
3) Talk about silent mode too.

^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v12 0/6] support dmadev
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (14 preceding siblings ...)
  2021-07-27  3:39 ` [dpdk-dev] [PATCH v11 0/2] support dmadev Chengwen Feng
@ 2021-07-29 13:06 ` Chengwen Feng
  2021-07-29 13:06   ` [dpdk-dev] [PATCH v12 1/6] dmadev: introduce DMA device library public APIs Chengwen Feng
                     ` (5 more replies)
  2021-08-03 11:29 ` [dpdk-dev] [PATCH v13 0/6] support dmadev Chengwen Feng
                   ` (13 subsequent siblings)
  29 siblings, 6 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-07-29 13:06 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch set contains six patch for new add dmadev.

Chengwen Feng (6):
  dmadev: introduce DMA device library public APIs
  dmadev: introduce DMA device library internal header
  dmadev: introduce DMA device library PMD header
  dmadev: introduce DMA device library implementation
  doc: add DMA device library guide
  maintainers: add for dmadev

---
v12:
* add max_sges filed for struct rte_dmadev_info.
* add more descriptor of dmadev.rst.
* replace scatter with scatter gather in code comment.
* split to six patch.
* fix typo.
v11:
* rename RTE_DMA_STATUS_UNKNOWN to RTE_DMA_STATUS_ERROR_UNKNOWN.
* add RTE_DMA_STATUS_INVALID_ADDR marco.
* update release-note.
* add acked-by for 1/2 patch.
* add dmadev programming guide which is 2/2 patch.
v10:
* fix rte_dmadev_completed_status comment.
v9:
* delete RTE_ASSERT invoke.
* make sure vchan setup fail when device started.
* add check src/dst port parameter when setup vchan.
* rename some variables in rte_dmadev.c.
v8:
* fix pcie access port diagram doxygen problem.
* fix typo.
* fix compile warning when enable DMADEV_DEBUG.
v7:
* add rte_dmadev_get_dev_id API.
* fix typo.
* use the default macro assignment scheme.
* rename RTE_DMA_DEV_CAPA_* to RTE_DMADEV_CAPA_*.
* rename rte_dmadev_conf.silent_mode to enable_silent.
* add memset when get stats.
v6:
* delete fence capability.
* delete vchan_release ops.
* copy_sg direct use src/dst/nb_src/nb_dst as paramter.
* define rte_dma_direction, don't support multiple direction in the
  same vchan.
* fix segment fault when allocate.
* fix typo.
* fix comments format.
v5:
* add doxy-api-* file modify.
* use RTE_LOG_REGISTER_DEFAULT.
* fix typo.
* resolve some incorrect comments.
* fix some doxgen problem.
* fix version.map still hold rte_dmadev_completed_fails.
v4:
* replace xxx_complete_fails with xxx_completed_status.
* add SILENT capability, also a silent_mode in rte_dmadev_conf.
* add op_flag_llc for performance.
* rename dmadev_xxx_t to rte_dmadev_xxx_t to avoid namespace conflict.
* delete filed 'enqueued_count' from rte_dmadev_stats.
* make rte_dmadev hold 'dev_private' filed.
* add RTE_DMA_STATUS_NOT_ATTEMPED status code.
* rename RTE_DMA_STATUS_ACTIVE_DROP to RTE_DMA_STATUS_USER_ABORT.
* rename rte_dma_sg(e) to rte_dmadev_sg(e) to make sure all struct
  prefix with rte_dmadev.
* put the comment afterwards.
* fix some doxgen problem.
* delete macro RTE_DMADEV_VALID_DEV_ID_OR_RET and
  RTE_DMADEV_PTR_OR_ERR_RET.
* replace strlcpy with rte_strscpy.
* other minor modifications from review comment.
v3:
* rm reset and fill_sg ops.
* rm MT-safe capabilities.
* add submit flag.
* redefine rte_dma_sg to implement asymmetric copy.
* delete some reserved field for future use.
* rearrangement rte_dmadev/rte_dmadev_data struct.
* refresh rte_dmadev.h copyright.
* update vchan setup parameter.
* modified some inappropriate descriptions.
* arrange version.map alphabetically.
* other minor modifications from review comment.

 MAINTAINERS                            |    5 +
 config/rte_config.h                    |    3 +
 doc/api/doxy-api-index.md              |    1 +
 doc/api/doxy-api.conf.in               |    1 +
 doc/guides/prog_guide/dmadev.rst       |  147 +++++
 doc/guides/prog_guide/index.rst        |    1 +
 doc/guides/rel_notes/release_21_08.rst |    6 +
 lib/dmadev/meson.build                 |    7 +
 lib/dmadev/rte_dmadev.c                |  563 +++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 1059 ++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h           |  182 ++++++
 lib/dmadev/rte_dmadev_pmd.h            |   72 +++
 lib/dmadev/version.map                 |   36 ++
 lib/meson.build                        |    1 +
 14 files changed, 2084 insertions(+)
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v12 1/6] dmadev: introduce DMA device library public APIs
  2021-07-29 13:06 ` [dpdk-dev] [PATCH v12 0/6] support dmadev Chengwen Feng
@ 2021-07-29 13:06   ` Chengwen Feng
  2021-07-29 13:06   ` [dpdk-dev] [PATCH v12 2/6] dmadev: introduce DMA device library internal header Chengwen Feng
                     ` (4 subsequent siblings)
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-07-29 13:06 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

The 'dmadevice' is a generic type of DMA device.

This patch introduce the 'dmadevice' public APIs which expose generic
operations that can enable configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
---
 doc/api/doxy-api-index.md |   1 +
 doc/api/doxy-api.conf.in  |   1 +
 lib/dmadev/meson.build    |   4 +
 lib/dmadev/rte_dmadev.h   | 963 ++++++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/version.map    |  25 ++
 lib/meson.build           |   1 +
 6 files changed, 995 insertions(+)
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/version.map

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107..ce08250 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -27,6 +27,7 @@ The public API headers are grouped by topics:
   [event_timer_adapter]    (@ref rte_event_timer_adapter.h),
   [event_crypto_adapter]   (@ref rte_event_crypto_adapter.h),
   [rawdev]             (@ref rte_rawdev.h),
+  [dmadev]             (@ref rte_dmadev.h),
   [metrics]            (@ref rte_metrics.h),
   [bitrate]            (@ref rte_bitrate.h),
   [latency]            (@ref rte_latencystats.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 325a019..a44a92b 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -34,6 +34,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/cmdline \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
+                          @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/distributor \
                           @TOPDIR@/lib/efd \
                           @TOPDIR@/lib/ethdev \
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000..6d5bd85
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+headers = files('rte_dmadev.h')
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000..b6eb970
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,963 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ * Copyright(c) 2021 Marvell International Ltd.
+ * Copyright(c) 2021 SmartShare Systems.
+ */
+
+#ifndef _RTE_DMADEV_H_
+#define _RTE_DMADEV_H_
+
+/**
+ * @file rte_dmadev.h
+ *
+ * RTE DMA (Direct Memory Access) device APIs.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW-DMA-channel |               | HW-DMA-channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW-DMA-Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel. e.g. Application could create
+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+ * virtual DMA channel 1 for memory-to-device transfer scenario.
+ *
+ * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
+ * PCI/SoC device probing phase performed at EAL initialization time. And could
+ * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
+ * phase.
+ *
+ * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dmadev_configure()
+ *     - rte_dmadev_vchan_setup()
+ *     - rte_dmadev_start()
+ *
+ * Then, the application can invoke dataplane APIs to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dmadev_configure() or rte_dmadev_vchan_setup()), it must invoke
+ * rte_dmadev_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dmadev_start() again. The dataplane APIs should not be
+ * invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the
+ * rte_dmadev_close() function.
+ *
+ * The dataplane APIs include two parts:
+ * The first part is the submission of operation requests:
+ *     - rte_dmadev_copy()
+ *     - rte_dmadev_copy_sg()
+ *     - rte_dmadev_fill()
+ *     - rte_dmadev_submit()
+ *
+ * These APIs could work with different virtual DMA channels which have
+ * different contexts.
+ *
+ * The first three APIs are used to submit the operation request to the virtual
+ * DMA channel, if the submission is successful, an uint16_t ring_idx is
+ * returned, otherwise a negative number is returned.
+ *
+ * The last API was used to issue doorbell to hardware, and also there are flags
+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
+ * same work.
+ *
+ * The second part is to obtain the result of requests:
+ *     - rte_dmadev_completed()
+ *         - return the number of operation requests completed successfully.
+ *     - rte_dmadev_completed_status()
+ *         - return the number of operation requests completed.
+ *
+ * @note If the dmadev works in silent mode (@see RTE_DMADEV_CAPA_SILENT),
+ * application does not invoke the above two completed APIs.
+ *
+ * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
+ * rte_dmadev_fill()) returned, the rules are as follows:
+ *     - ring_idx for each virtual DMA channel are independent.
+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *       when it reach UINT16_MAX, it wraps back to zero.
+ *     - This ring_idx can be used by applications to track per-operation
+ *       metadata in an application-defined circular ring.
+ *     - The initial ring_idx of a virtual DMA channel is zero, after the
+ *       device is stopped, the ring_idx needs to be reset to zero.
+ *
+ * One example:
+ *     - step-1: start one dmadev
+ *     - step-2: enqueue a copy operation, the ring_idx return is 0
+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1
+ *     - ...
+ *     - step-101: stop the dmadev
+ *     - step-102: start the dmadev
+ *     - step-103: enqueue a copy operation, the cookie return is 0
+ *     - ...
+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *
+ * The DMA operation address used in enqueue APIs (i.e. rte_dmadev_copy(),
+ * rte_dmadev_copy_sg(), rte_dmadev_fill()) defined as rte_iova_t type. The
+ * dmadev supports two types of address: memory address and device address.
+ *
+ * - memory address: the source and destination address of the memory-to-memory
+ * transfer type, or the source address of the memory-to-device transfer type,
+ * or the destination address of the device-to-memory transfer type.
+ * @note If the device support SVA (@see RTE_DMADEV_CAPA_SVA), the memory
+ * address can be any VA address, otherwise it must be an IOVA address.
+ *
+ * - device address: the source and destination address of the device-to-device
+ * transfer type, or the source address of the device-to-memory transfer type,
+ * or the destination address of the memory-to-device transfer type.
+ *
+ * By default, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
+ *
+ */
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the device identifier for the named DMA device.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   Returns DMA device identifier on success.
+ *   - <0: Failure to find named DMA device.
+ */
+__rte_experimental
+int
+rte_dmadev_get_dev_id(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_count(void);
+
+/* Enumerates DMA device capabilities. */
+#define RTE_DMADEV_CAPA_MEM_TO_MEM	(1ull << 0)
+/**< DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_MEM_TO_DEV	(1ull << 1)
+/**< DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_MEM	(1ull << 2)
+/**< DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_DEV	(1ull << 3)
+/**< DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_SVA		(1ull << 4)
+/**< DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_SILENT		(1ull << 5)
+/**< DMA device support work in silent mode.
+ * In this mode, application don't required to invoke rte_dmadev_completed*()
+ * API.
+ *
+ * @see struct rte_dmadev_conf::silent_mode
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY	(1ull << 32)
+/**< DMA device support copy ops.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY_SG	(1ull << 33)
+/**< DMA device support scatter-gather list copy ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_FILL	(1ull << 34)
+/**< DMA device support fill ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ */
+struct rte_dmadev_info {
+	struct rte_device *device; /**< Generic Device information. */
+	uint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channels supported. */
+	uint16_t max_desc;
+	/**< Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/**< Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t max_sges;
+	/**< Maximum number of source or destination scatter-gather entry
+	 * supported.
+	 * If the device does not support COPY_SG capability, this value can be
+	 * zero.
+	 * If the device supports COPY_SG capability, then rte_dmadev_copy_sg()
+	 * parameter nb_src/nb_dst should not exceed this value.
+	 */
+	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   - =0: Success, driver updates the information of the DMA device.
+ *   - <0: Error code returned by the driver info get function.
+ *
+ */
+__rte_experimental
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ */
+struct rte_dmadev_conf {
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channel to use.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dmadev_info which get from rte_dmadev_info_get().
+	 */
+	bool enable_silent;
+	/**< Indicates whether to enable silent mode.
+	 * false-default mode, true-silent mode.
+	 * This value can be set to true only when the SILENT capability is
+	 * supported.
+	 *
+	 * @see RTE_DMADEV_CAPA_SILENT
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dmadev_conf
+ *   object.
+ *
+ * @return
+ *   - =0: Success, device configured.
+ *   - <0: Error code returned by the driver configuration function.
+ */
+__rte_experimental
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device started.
+ *   - <0: Error code returned by the driver start function.
+ */
+__rte_experimental
+int
+rte_dmadev_start(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dmadev_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device stopped.
+ *   - <0: Error code returned by the driver stop function.
+ */
+__rte_experimental
+int
+rte_dmadev_stop(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *  - =0: Successfully close device
+ *  - <0: Failure to close device
+ */
+__rte_experimental
+int
+rte_dmadev_close(uint16_t dev_id);
+
+/**
+ * rte_dma_direction - DMA transfer direction defines.
+ */
+enum rte_dma_direction {
+	RTE_DMA_DIR_MEM_TO_MEM,
+	/**< DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV,
+	/**< DMA transfer direction - from memory to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is SoCs memory) to device (which is host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM,
+	/**< DMA transfer direction - from device to memory.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to memory (which is SoCs memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV,
+	/**< DMA transfer direction - from device to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to the device (which is another host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+};
+
+/**
+ * enum rte_dmadev_port_type - DMA access port type defines.
+ *
+ * @see struct rte_dmadev_port_param::port_type
+ */
+enum rte_dmadev_port_type {
+	RTE_DMADEV_PORT_NONE,
+	RTE_DMADEV_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dmadev_vchan_conf::src_port
+ * @see struct rte_dmadev_vchan_conf::dst_port
+ */
+struct rte_dmadev_port_param {
+	enum rte_dmadev_port_type port_type;
+	/**< The device access port type.
+	 * @see enum rte_dmadev_port_type
+	 */
+	union {
+		/** PCIe access port parameters.
+		 *
+		 * The following model shows SoC's PCIe module connects to
+		 * multiple PCIe hosts and multiple endpoints. The PCIe module
+		 * has an integrated DMA controller.
+		 *
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * \code{.unparsed}
+		 * System Bus
+		 *    |     ----------PCIe module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIe Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIe Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIe Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * \endcode
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		struct {
+			uint64_t coreid : 4; /**< PCIe core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			uint64_t pasid : 20;
+			/**< The pasid filed in TLP packet. */
+			uint64_t attr : 3;
+			/**< The attributes filed in TLP packet. */
+			uint64_t ph : 2;
+			/**< The processing hint filed in TLP packet. */
+			uint64_t st : 16;
+			/**< The steering tag filed in TLP packet. */
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ */
+struct rte_dmadev_vchan_conf {
+	enum rte_dma_direction direction;
+	/**< Transfer direction
+	 * @see enum rte_dma_direction
+	 */
+	uint16_t nb_desc;
+	/**< Number of descriptor for the virtual DMA channel */
+	struct rte_dmadev_port_param src_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+	struct rte_dmadev_port_param dst_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dmadev_vchan_conf object.
+ *
+ * @return
+ *   - >=0: Allocate success, it is the virtual DMA channel id. This value must
+ *          be less than the field 'max_vchans' of struct rte_dmadev_conf
+ *          which configured by rte_dmadev_configure().
+ *   - <0: Error code returned by the driver virtual channel setup function.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf);
+
+/**
+ * rte_dmadev_stats - running statistics.
+ */
+struct rte_dmadev_stats {
+	uint64_t submitted_count;
+	/**< Count of operations which were submitted to hardware. */
+	uint64_t completed_fail_count;
+	/**< Count of operations which failed to complete. */
+	uint64_t completed_count;
+	/**< Count of operations which successfully complete. */
+};
+
+#define RTE_DMADEV_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dmadev_stats
+ *   object.
+ *
+ * @return
+ *   - =0: Successfully retrieve stats.
+ *   - <0: Failure to retrieve stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   - =0: Successfully reset stats.
+ *   - <0: Failure to reset stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Non-zero otherwise.
+ */
+__rte_experimental
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger the dmadev self test.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - 0: Selftest successful.
+ *   - -ENOTSUP if the device doesn't support selftest
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_selftest(uint16_t dev_id);
+
+/**
+ * rte_dma_status_code - DMA transfer result status code defines.
+ */
+enum rte_dma_status_code {
+	RTE_DMA_STATUS_SUCCESSFUL,
+	/**< The operation completed successfully. */
+	RTE_DMA_STATUS_USER_ABORT,
+	/**< The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPTED,
+	/**< The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/**< The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/**< The operation failed to complete due invalid destination
+	 * address.
+	 */
+	RTE_DMA_STATUS_INVALID_ADDR,
+	/**< The operation failed to complete due invalid source or destination
+	 * address, cover the case that only knows the address error, but not
+	 * sure which address error.
+	 */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/**< The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/**< The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/**< The operation failed to complete due bus err. */
+	RTE_DMA_STATUS_DATA_POISION,
+	/**< The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/**< The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/**< The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the memory-to-device/
+	 * device-to-memory/device-to-device transfer scenario.
+	 */
+	RTE_DMA_STATUS_ERROR_UNKNOWN = 0x100,
+	/**< The operation failed to complete due unknown reason.
+	 * The initial value is 256, which reserves space for future errors.
+	 */
+};
+
+/**
+ * rte_dmadev_sge - can hold scatter-gather DMA operation request entry.
+ */
+struct rte_dmadev_sge {
+	rte_iova_t addr; /**< The DMA operation address. */
+	uint32_t length; /**< The DMA operation length. */
+};
+
+/* DMA flags to augment operation preparation. */
+#define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
+/**< DMA fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ * If the specify DMA HW works in-order (it means it has default fence between
+ * operations), this flag could be NOP.
+ *
+ * @see rte_dmadev_copy()
+ * @see rte_dmadev_copy_sg()
+ * @see rte_dmadev_fill()
+ */
+
+#define RTE_DMA_OP_FLAG_SUBMIT	(1ull << 1)
+/**< DMA submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ */
+
+#define RTE_DMA_OP_FLAG_LLC	(1ull << 2)
+/**< DMA write data to low level cache hint.
+ * Used for performance optimization, this is just a hint, and there is no
+ * capability bit for this, driver should not return error if this flag was set.
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy job.
+ *   - <0: Error code returned by the driver copy function.
+ */
+__rte_experimental
+int
+rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+		uint32_t length, uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter-gather list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter-gather list copy operation to be performed by
+ * hardware, if the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then
+ * trigger doorbell to begin this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The pointer of source scatter-gather entry array.
+ * @param dst
+ *   The pointer of destination scatter-gather entry array.
+ * @param nb_src
+ *   The number of source scatter-gather entry.
+ *   @see struct rte_dmadev_info::max_sges
+ * @param nb_dst
+ *   The number of destination scatter-gather entry.
+ *   @see struct rte_dmadev_info::max_sges
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy scatter-gather list job.
+ *   - <0: Error code returned by the driver copy scatter-gather list function.
+ */
+__rte_experimental
+int
+rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
+		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
+		   uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued fill job.
+ *   - <0: Error code returned by the driver fill function.
+ */
+__rte_experimental
+int
+rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
+		rte_iova_t dst, uint32_t length, uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dmadev_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   - =0: Successfully trigger hardware.
+ *   - <0: Failure to trigger hardware.
+ */
+__rte_experimental
+int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed. This return value
+ *   must be less than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		     uint16_t *last_idx, bool *has_error);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been completed, and the
+ * operations result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   This is a pointer to an array of length 'nb_cpls' that holds the completion
+ *   status code of each operation.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that completed. This return value must be less
+ *   than or equal to the value of nb_cpls.
+ *   If this number is greater than zero (assuming n), then n values in the
+ *   status array are also set.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
+			    const uint16_t nb_cpls, uint16_t *last_idx,
+			    enum rte_dma_status_code *status);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000..02fffe3
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,25 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dmadev_close;
+	rte_dmadev_completed;
+	rte_dmadev_completed_status;
+	rte_dmadev_configure;
+	rte_dmadev_copy;
+	rte_dmadev_copy_sg;
+	rte_dmadev_count;
+	rte_dmadev_dump;
+	rte_dmadev_fill;
+	rte_dmadev_get_dev_id;
+	rte_dmadev_info_get;
+	rte_dmadev_is_valid_dev;
+	rte_dmadev_selftest;
+	rte_dmadev_start;
+	rte_dmadev_stats_get;
+	rte_dmadev_stats_reset;
+	rte_dmadev_stop;
+	rte_dmadev_submit;
+	rte_dmadev_vchan_setup;
+
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4..a542c23 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -44,6 +44,7 @@ libraries = [
         'power',
         'pdump',
         'rawdev',
+        'dmadev',
         'regexdev',
         'rib',
         'reorder',
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v12 2/6] dmadev: introduce DMA device library internal header
  2021-07-29 13:06 ` [dpdk-dev] [PATCH v12 0/6] support dmadev Chengwen Feng
  2021-07-29 13:06   ` [dpdk-dev] [PATCH v12 1/6] dmadev: introduce DMA device library public APIs Chengwen Feng
@ 2021-07-29 13:06   ` Chengwen Feng
  2021-07-29 13:06   ` [dpdk-dev] [PATCH v12 3/6] dmadev: introduce DMA device library PMD header Chengwen Feng
                     ` (3 subsequent siblings)
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-07-29 13:06 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce DMA device library internal header, which contains
internal data types that are used by the DMA devices in order to expose
their ops to the class.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 lib/dmadev/meson.build       |   1 +
 lib/dmadev/rte_dmadev_core.h | 180 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 181 insertions(+)
 create mode 100644 lib/dmadev/rte_dmadev_core.h

diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index 6d5bd85..f421ec1 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -2,3 +2,4 @@
 # Copyright(c) 2021 HiSilicon Limited.
 
 headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000..599ab15
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#ifndef _RTE_DMADEV_CORE_H_
+#define _RTE_DMADEV_CORE_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device internal header.
+ *
+ * This header contains internal data types, that are used by the DMA devices
+ * in order to expose their ops to the class.
+ *
+ * Applications should not use these API directly.
+ *
+ */
+
+struct rte_dmadev;
+
+typedef int (*rte_dmadev_info_get_t)(const struct rte_dmadev *dev,
+				     struct rte_dmadev_info *dev_info,
+				     uint32_t info_sz);
+/**< @internal Used to get device information of a device. */
+
+typedef int (*rte_dmadev_configure_t)(struct rte_dmadev *dev,
+				      const struct rte_dmadev_conf *dev_conf);
+/**< @internal Used to configure a device. */
+
+typedef int (*rte_dmadev_start_t)(struct rte_dmadev *dev);
+/**< @internal Used to start a configured device. */
+
+typedef int (*rte_dmadev_stop_t)(struct rte_dmadev *dev);
+/**< @internal Used to stop a configured device. */
+
+typedef int (*rte_dmadev_close_t)(struct rte_dmadev *dev);
+/**< @internal Used to close a configured device. */
+
+typedef int (*rte_dmadev_vchan_setup_t)(struct rte_dmadev *dev,
+				const struct rte_dmadev_vchan_conf *conf);
+/**< @internal Used to allocate and set up a virtual DMA channel. */
+
+typedef int (*rte_dmadev_stats_get_t)(const struct rte_dmadev *dev,
+			uint16_t vchan, struct rte_dmadev_stats *stats,
+			uint32_t stats_sz);
+/**< @internal Used to retrieve basic statistics. */
+
+typedef int (*rte_dmadev_stats_reset_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to reset basic statistics. */
+
+typedef int (*rte_dmadev_dump_t)(const struct rte_dmadev *dev, FILE *f);
+/**< @internal Used to dump internal information. */
+
+typedef int (*rte_dmadev_selftest_t)(uint16_t dev_id);
+/**< @internal Used to start dmadev selftest. */
+
+typedef int (*rte_dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 rte_iova_t src, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a copy operation. */
+
+typedef int (*rte_dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
+				    const struct rte_dmadev_sge *src,
+				    const struct rte_dmadev_sge *dst,
+				    uint16_t nb_src, uint16_t nb_dst,
+				    uint64_t flags);
+/**< @internal Used to enqueue a scatter-gather list copy operation. */
+
+typedef int (*rte_dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 uint64_t pattern, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a fill operation. */
+
+typedef int (*rte_dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to trigger hardware to begin working. */
+
+typedef uint16_t (*rte_dmadev_completed_t)(struct rte_dmadev *dev,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+/**< @internal Used to return number of successful completed operations. */
+
+typedef uint16_t (*rte_dmadev_completed_status_t)(struct rte_dmadev *dev,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+/**< @internal Used to return number of completed operations. */
+
+/**
+ * Possible states of a DMA device.
+ */
+enum rte_dmadev_state {
+	RTE_DMADEV_UNUSED = 0,
+	/**< Device is unused before being probed. */
+	RTE_DMADEV_ATTACHED,
+	/**< Device is attached when allocated in probing. */
+};
+
+/**
+ * DMA device operations function pointer table
+ */
+struct rte_dmadev_ops {
+	rte_dmadev_info_get_t dev_info_get;
+	rte_dmadev_configure_t dev_configure;
+	rte_dmadev_start_t dev_start;
+	rte_dmadev_stop_t dev_stop;
+	rte_dmadev_close_t dev_close;
+	rte_dmadev_vchan_setup_t vchan_setup;
+	rte_dmadev_stats_get_t stats_get;
+	rte_dmadev_stats_reset_t stats_reset;
+	rte_dmadev_dump_t dev_dump;
+	rte_dmadev_selftest_t dev_selftest;
+};
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_dmadev_data {
+	void *dev_private;
+	/**< PMD-specific private data.
+	 * This is a copy of the 'dev_private' field in the 'struct rte_dmadev'
+	 * from primary process, it is used by the secondary process to get
+	 * dev_private information.
+	 */
+	uint16_t dev_id; /**< Device [external] identifier. */
+	char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ *
+ * The dataplane APIs are located at the beginning of the structure, along
+ * with the pointer to where all the data elements for the particular device
+ * are stored in shared memory. This split scheme allows the function pointer
+ * and driver data to be per-process, while the actual configuration data for
+ * the device is shared.
+ * And the 'dev_private' field was placed in the first cache line to optimize
+ * performance because the PMD driver mainly depends on this field.
+ */
+struct rte_dmadev {
+	rte_dmadev_copy_t copy;
+	rte_dmadev_copy_sg_t copy_sg;
+	rte_dmadev_fill_t fill;
+	rte_dmadev_submit_t submit;
+	rte_dmadev_completed_t completed;
+	rte_dmadev_completed_status_t completed_status;
+	void *reserved_ptr; /**< Reserved for future IO function. */
+	void *dev_private;
+	/**< PMD-specific private data.
+	 *
+	 * - If is the primary process, after dmadev allocated by
+	 * rte_dmadev_pmd_allocate(), the PCI/SoC device probing should
+	 * initialize this field, and copy it's value to the 'dev_private'
+	 * field of 'struct rte_dmadev_data' which pointer by 'data' filed.
+	 *
+	 * - If is the secondary process, dmadev framework will initialize this
+	 * field by copy from 'dev_private' field of 'struct rte_dmadev_data'
+	 * which initialized by primary process.
+	 *
+	 * @note It's the primary process responsibility to deinitialize this
+	 * field after invoke rte_dmadev_pmd_release() in the PCI/SoC device
+	 * removing stage.
+	 */
+	struct rte_dmadev_data *data; /**< Pointer to device data. */
+	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
+	struct rte_device *device;
+	/**< Device info which supplied during device initialization. */
+	enum rte_dmadev_state state; /**< Flag indicating the device state. */
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+} __rte_cache_aligned;
+
+#endif /* _RTE_DMADEV_CORE_H_ */
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v12 3/6] dmadev: introduce DMA device library PMD header
  2021-07-29 13:06 ` [dpdk-dev] [PATCH v12 0/6] support dmadev Chengwen Feng
  2021-07-29 13:06   ` [dpdk-dev] [PATCH v12 1/6] dmadev: introduce DMA device library public APIs Chengwen Feng
  2021-07-29 13:06   ` [dpdk-dev] [PATCH v12 2/6] dmadev: introduce DMA device library internal header Chengwen Feng
@ 2021-07-29 13:06   ` Chengwen Feng
  2021-07-29 13:06   ` [dpdk-dev] [PATCH v12 4/6] dmadev: introduce DMA device library implementation Chengwen Feng
                     ` (2 subsequent siblings)
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-07-29 13:06 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce DMA device library PMD header which was driver
facing APIs for a DMA device.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 lib/dmadev/meson.build      |  1 +
 lib/dmadev/rte_dmadev.h     |  2 ++
 lib/dmadev/rte_dmadev_pmd.h | 72 +++++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/version.map      | 10 +++++++
 4 files changed, 85 insertions(+)
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h

diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index f421ec1..833baf7 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -3,3 +3,4 @@
 
 headers = files('rte_dmadev.h')
 indirect_headers += files('rte_dmadev_core.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index b6eb970..329e3a3 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -744,6 +744,8 @@ struct rte_dmadev_sge {
 	uint32_t length; /**< The DMA operation length. */
 };
 
+#include "rte_dmadev_core.h"
+
 /* DMA flags to augment operation preparation. */
 #define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
 /**< DMA fence flag.
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000..45141f9
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_PMD_H_
+#define _RTE_DMADEV_PMD_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device PMD APIs
+ *
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param dev
+ *   Device to be released.
+ *
+ * @return
+ *   - 0 on success, negative on error
+ */
+__rte_internal
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev);
+
+/**
+ * @internal
+ * Return the DMA device based on the device name.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_PMD_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index 02fffe3..408b93c 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -23,3 +23,13 @@ EXPERIMENTAL {
 
 	local: *;
 };
+
+INTERNAL {
+        global:
+
+	rte_dmadev_get_device_by_name;
+	rte_dmadev_pmd_allocate;
+	rte_dmadev_pmd_release;
+
+	local: *;
+};
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v12 4/6] dmadev: introduce DMA device library implementation
  2021-07-29 13:06 ` [dpdk-dev] [PATCH v12 0/6] support dmadev Chengwen Feng
                     ` (2 preceding siblings ...)
  2021-07-29 13:06   ` [dpdk-dev] [PATCH v12 3/6] dmadev: introduce DMA device library PMD header Chengwen Feng
@ 2021-07-29 13:06   ` Chengwen Feng
  2021-07-29 13:06   ` [dpdk-dev] [PATCH v12 5/6] doc: add DMA device library guide Chengwen Feng
  2021-07-29 13:06   ` [dpdk-dev] [PATCH v12 6/6] maintainers: add for dmadev Chengwen Feng
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-07-29 13:06 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce DMA device library implementation which includes
configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 config/rte_config.h          |   3 +
 lib/dmadev/meson.build       |   1 +
 lib/dmadev/rte_dmadev.c      | 563 +++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev.h      | 118 ++++++++-
 lib/dmadev/rte_dmadev_core.h |   2 +
 lib/dmadev/version.map       |   1 +
 6 files changed, 676 insertions(+), 12 deletions(-)
 create mode 100644 lib/dmadev/rte_dmadev.c

diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c..331a431 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -81,6 +81,9 @@
 /* rawdev defines */
 #define RTE_RAWDEV_MAX_DEVS 64
 
+/* dmadev defines */
+#define RTE_DMADEV_MAX_DEVS 64
+
 /* ip_fragmentation defines */
 #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
 #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index 833baf7..d2fc85e 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2021 HiSilicon Limited.
 
+sources = files('rte_dmadev.c')
 headers = files('rte_dmadev.h')
 indirect_headers += files('rte_dmadev_core.h')
 driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000..b4f5498
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,563 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
+
+static const char *mz_rte_dmadev_data = "rte_dmadev_data";
+/* Shared memory between primary and secondary processes. */
+static struct {
+	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
+} *dmadev_shared_data;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dmadev_logtype, INFO);
+#define RTE_DMADEV_LOG(level, ...) \
+	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "" __VA_ARGS__)
+
+/* Macros to check for valid device id */
+#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dmadev_is_valid_dev(dev_id)) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+static int
+dmadev_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMADEV_LOG(ERR, "Name can't be NULL\n");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMADEV_LOG(ERR, "Zero length DMA device name\n");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
+		RTE_DMADEV_LOG(ERR, "DMA device name is too long\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint16_t
+dmadev_find_free_dev(void)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (dmadev_shared_data->data[i].dev_name[0] == '\0')
+			return i;
+	}
+
+	return RTE_DMADEV_MAX_DEVS;
+}
+
+static struct rte_dmadev*
+dmadev_find(const char *name)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&
+		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
+			return &rte_dmadevices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dmadev_shared_data_prepare(void)
+{
+	const struct rte_memzone *mz;
+
+	if (dmadev_shared_data == NULL) {
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			/* Allocate port data and ownership shared memory. */
+			mz = rte_memzone_reserve(mz_rte_dmadev_data,
+					 sizeof(*dmadev_shared_data),
+					 rte_socket_id(), 0);
+		} else
+			mz = rte_memzone_lookup(mz_rte_dmadev_data);
+		if (mz == NULL)
+			return -ENOMEM;
+
+		dmadev_shared_data = mz->addr;
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+			memset(dmadev_shared_data->data, 0,
+			       sizeof(dmadev_shared_data->data));
+	}
+
+	return 0;
+}
+
+static struct rte_dmadev *
+dmadev_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t dev_id;
+
+	dev = dmadev_find(name);
+	if (dev != NULL) {
+		RTE_DMADEV_LOG(ERR, "DMA device already allocated\n");
+		return NULL;
+	}
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	dev_id = dmadev_find_free_dev();
+	if (dev_id == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices\n");
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+	dev->data = &dmadev_shared_data->data[dev_id];
+	dev->data->dev_id = dev_id;
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+
+	return dev;
+}
+
+static struct rte_dmadev *
+dmadev_attach_secondary(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t i;
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %s is not driven by the primary process\n",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[i];
+	dev->data = &dmadev_shared_data->data[i];
+	dev->dev_private = dev->data->dev_private;
+
+	return dev;
+}
+
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dmadev_allocate(name);
+	else
+		dev = dmadev_attach_secondary(name);
+
+	if (dev == NULL)
+		return NULL;
+	dev->state = RTE_DMADEV_ATTACHED;
+
+	return dev;
+}
+
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev)
+{
+	void *dev_private_tmp;
+
+	if (dev == NULL)
+		return -EINVAL;
+
+	if (dev->state == RTE_DMADEV_UNUSED)
+		return 0;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
+
+	dev_private_tmp = dev->dev_private;
+	memset(dev, 0, sizeof(struct rte_dmadev));
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev->dev_private = dev_private_tmp;
+	dev->state = RTE_DMADEV_UNUSED;
+
+	return 0;
+}
+
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name)
+{
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+	return dmadev_find(name);
+}
+
+int
+rte_dmadev_get_dev_id(const char *name)
+{
+	struct rte_dmadev *dev = rte_dmadev_get_device_by_name(name);
+	if (dev != NULL)
+		return dev->data->dev_id;
+	return -EINVAL;
+}
+
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id)
+{
+	return (dev_id < RTE_DMADEV_MAX_DEVS) &&
+		rte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED;
+}
+
+uint16_t
+rte_dmadev_count(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
+			count++;
+	}
+
+	return count;
+}
+
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dmadev_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->device = dev->device;
+	dev_info->nb_vchans = dev->data->dev_conf.max_vchans;
+
+	return 0;
+}
+
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->max_vchans == 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure zero vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->max_vchans > info.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure too many vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_silent &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_SILENT)) {
+		RTE_DMADEV_LOG(ERR, "Device %u don't support silent\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf);
+	if (ret == 0)
+		memcpy(&dev->data->dev_conf, dev_conf, sizeof(*dev_conf));
+
+	return ret;
+}
+
+int
+rte_dmadev_start(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already started\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->data->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dmadev_stop(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started == 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already stopped\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dmadev_close(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped before closing\n", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	return (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	bool src_is_dev, dst_is_dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u number of descriptors invalid\n", dev_id);
+		return -EINVAL;
+	}
+	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->src_port.port_type == RTE_DMADEV_PORT_NONE && src_is_dev) ||
+	    (conf->src_port.port_type != RTE_DMADEV_PORT_NONE && !src_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u source port type invalid\n", dev_id);
+		return -EINVAL;
+	}
+	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->dst_port.port_type == RTE_DMADEV_PORT_NONE && dst_is_dev) ||
+	    (conf->dst_port.port_type != RTE_DMADEV_PORT_NONE && !dst_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u destination port type invalid\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, conf);
+}
+
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (stats == NULL)
+		return -EINVAL;
+	if (vchan >= dev->data->dev_conf.max_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	memset(stats, 0, sizeof(struct rte_dmadev_stats));
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dmadev_stats));
+}
+
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->data->dev_conf.max_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (f == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+
+	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
+	fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
+	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
+	fprintf(f, "  max_vchans_configured: %u\n", info.nb_vchans);
+	fprintf(f, "  silent_mode: %s\n",
+		dev->data->dev_conf.enable_silent ? "on" : "off");
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
+
+int
+rte_dmadev_selftest(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
+	return (*dev->dev_ops->dev_selftest)(dev_id);
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 329e3a3..f732b4c 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -800,9 +800,21 @@ struct rte_dmadev_sge {
  *   - <0: Error code returned by the driver copy function.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
-		uint32_t length, uint64_t flags);
+		uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+#endif
+
+	return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
 
 /**
  * @warning
@@ -837,10 +849,23 @@ rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
  *   - <0: Error code returned by the driver copy scatter-gather list function.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
 		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
-		   uint64_t flags);
+		   uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+#endif
+
+	return (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);
+}
 
 /**
  * @warning
@@ -871,9 +896,21 @@ rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
  *   - <0: Error code returned by the driver fill function.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
-		rte_iova_t dst, uint32_t length, uint64_t flags);
+		rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+#endif
+
+	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
 
 /**
  * @warning
@@ -894,8 +931,20 @@ rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
  *   - <0: Failure to trigger hardware.
  */
 __rte_experimental
-int
-rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
+static inline int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+#endif
+
+	return (*dev->submit)(dev, vchan);
+}
 
 /**
  * @warning
@@ -921,9 +970,37 @@ rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
  *   must be less than or equal to the value of nb_cpls.
  */
 __rte_experimental
-uint16_t
+static inline uint16_t
 rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
-		     uint16_t *last_idx, bool *has_error);
+		     uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans || nb_cpls == 0)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
 
 /**
  * @warning
@@ -953,10 +1030,27 @@ rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
  *   status array are also set.
  */
 __rte_experimental
-uint16_t
+static inline uint16_t
 rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
 			    const uint16_t nb_cpls, uint16_t *last_idx,
-			    enum rte_dma_status_code *status);
+			    enum rte_dma_status_code *status)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    nb_cpls == 0 || status == NULL)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
+}
 
 #ifdef __cplusplus
 }
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index 599ab15..9272725 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -177,4 +177,6 @@ struct rte_dmadev {
 	uint64_t reserved[2]; /**< Reserved for future fields. */
 } __rte_cache_aligned;
 
+extern struct rte_dmadev rte_dmadevices[];
+
 #endif /* _RTE_DMADEV_CORE_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index 408b93c..86c5e75 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -27,6 +27,7 @@ EXPERIMENTAL {
 INTERNAL {
         global:
 
+	rte_dmadevices;
 	rte_dmadev_get_device_by_name;
 	rte_dmadev_pmd_allocate;
 	rte_dmadev_pmd_release;
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v12 5/6] doc: add DMA device library guide
  2021-07-29 13:06 ` [dpdk-dev] [PATCH v12 0/6] support dmadev Chengwen Feng
                     ` (3 preceding siblings ...)
  2021-07-29 13:06   ` [dpdk-dev] [PATCH v12 4/6] dmadev: introduce DMA device library implementation Chengwen Feng
@ 2021-07-29 13:06   ` Chengwen Feng
  2021-07-29 13:06   ` [dpdk-dev] [PATCH v12 6/6] maintainers: add for dmadev Chengwen Feng
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-07-29 13:06 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch adds dmadev library guide.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
 doc/guides/prog_guide/dmadev.rst | 147 +++++++++++++++++++++++++++++++++++++++
 doc/guides/prog_guide/index.rst  |   1 +
 2 files changed, 148 insertions(+)
 create mode 100644 doc/guides/prog_guide/dmadev.rst

diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
new file mode 100644
index 0000000..e3ebb4d
--- /dev/null
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -0,0 +1,147 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright 2021 HiSilicon Limited
+
+DMA Device Library
+====================
+
+The DMA library provides a DMA device framework for management and provisioning
+of hardware and software DMA poll mode drivers, defining generic APIs which
+support a number of different DMA operations.
+
+
+Design Principles
+-----------------
+
+The DMA library follows the same basic principles as those used in DPDK's
+Ethernet Device framework and the RegEx framework. The DMA framework provides
+a generic DMA device framework which supports both physical (hardware)
+and virtual (software) DMA devices as well as a generic DMA API which allows
+DMA devices to be managed and configured and supports DMA operations to be
+provisioned on DMA poll mode driver.
+
+Figure below outlines the model of the DMA framework built on:
+
+.. code-block:: console
+
+    +-------------+   +-------------+       +-------------+
+    | virtual DMA |   | virtual DMA |       | virtual DMA |
+    | channel     |   | channel     |       | channel     |
+    +-------------+   +-------------+       +-------------+
+           |                 |                     |
+           -------------------                     |
+                    |                              |
+              +----------+                    +----------+
+              |  dmadev  |                    |  dmadev  |
+              +----------+                    +----------+
+                    |                              |
+            +--------------+                +--------------+
+            | hardware DMA |                | hardware DMA |
+            | channel      |                | channel      |
+            +--------------+                +--------------+
+                    |                              |
+                    --------------------------------
+                                    |
+                             +--------------+
+                             | hardware DMA |
+                             | controller   |
+                             +--------------+
+
+ * The DMA controller could have multiple hardware DMA channels (aka. hardware
+   DMA queues), each hardware DMA channel should be represented by a dmadev.
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+   channel represents a different transfer context. The DMA operation request
+   must be submitted to the virtual DMA channel. e.g. Application could create
+   virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+   virtual DMA channel 1 for memory-to-device transfer scenario.
+
+
+Device Management
+-----------------
+
+Device Creation
+~~~~~~~~~~~~~~~
+
+Physical DMA controller is discovered during the PCI probe/enumeration of the
+EAL function which is executed at DPDK initialization, based on their PCI
+device identifier, each unique PCI BDF (bus/bridge, device, function). Specific
+physical DMA controller, like other physical devices in DPDK can be listed using
+the EAL command line options.
+
+And then dmadevs are dynamically allocated by rte_dmadev_pmd_allocate() based on
+the number of hardware DMA channels.
+
+
+Device Identification
+~~~~~~~~~~~~~~~~~~~~~
+
+Each DMA device, whether physical or virtual is uniquely designated by two
+identifiers:
+
+- A unique device index used to designate the DMA device in all functions
+  exported by the DMA API.
+
+- A device name used to designate the DMA device in console messages, for
+  administration or debugging purposes.
+
+
+Device Configuration
+~~~~~~~~~~~~~~~~~~~~
+
+The rte_dmadev_configure API is used to configure a DMA device.
+
+.. code-block:: c
+
+   int rte_dmadev_configure(uint16_t dev_id,
+                            const struct rte_dmadev_conf *dev_conf);
+
+The ``rte_dmadev_conf`` structure is used to pass the configuration parameters
+for the DMA device for example maximum number of virtual DMA channels,
+indication of whether to enable silent mode.
+
+
+Configuration of Virtual DMA Channels
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The rte_dmadev_vchan_setup API is used to configure a virtual DMA channel.
+
+.. code-block:: c
+
+   int rte_dmadev_vchan_setup(uint16_t dev_id,
+		                      const struct rte_dmadev_vchan_conf *conf);
+
+The ``rte_dmadev_vchan_conf`` structure is used to pass the configuration
+parameters for the virtual DMA channel for example transfer direction, number of
+descriptor for the virtual DMA channel, source device access port parameter,
+destination device access port parameter.
+
+
+Device Features and Capabilities
+--------------------------------
+
+DMA devices may support different feature set. In order to get the supported PMD
+features ``rte_dmadev_info_get`` API which returns the info of the device and
+it's supported features.
+
+A special device capability is silent mode which application don't required to
+invoke dequeue APIs.
+
+
+Enqueue / Dequeue APIs
+~~~~~~~~~~~~~~~~~~~~~~
+
+The enqueue APIs include like ``rte_dmadev_copy`` and ``rte_dmadev_fill``, if
+enqueue successful, an uint16_t ring_idx is returned. This ring_idx can be used
+by applications to track per-operation metadata in an application defined
+circular ring.
+
+The ``rte_dmadev_submit`` API was used to issue doorbell to hardware, and also
+there are flags (``RTE_DMA_OP_FLAG_SUBMIT``) parameter of the enqueue APIs
+could do the same work.
+
+There are two dequeue APIs (``rte_dmadev_completed`` and
+``rte_dmadev_completed_status``) could used to obtain the result of request.
+The first API returns the number of operation requests completed successfully,
+the second API returns the number of operation requests completed which may
+successfully or failed and also with meaningful status code. Also these two
+APIs could return the last completed operation's ring_idx which will help to
+track application-defined circular ring.
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 2dce507..0abea06 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -29,6 +29,7 @@ Programmer's Guide
     regexdev
     rte_security
     rawdev
+    dmadev
     link_bonding_poll_mode_drv_lib
     timer_lib
     hash_lib
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v12 6/6] maintainers: add for dmadev
  2021-07-29 13:06 ` [dpdk-dev] [PATCH v12 0/6] support dmadev Chengwen Feng
                     ` (4 preceding siblings ...)
  2021-07-29 13:06   ` [dpdk-dev] [PATCH v12 5/6] doc: add DMA device library guide Chengwen Feng
@ 2021-07-29 13:06   ` Chengwen Feng
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-07-29 13:06 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch add Chengwen Feng as dmadev's maintainer.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
 MAINTAINERS                            | 5 +++++
 doc/guides/rel_notes/release_21_08.rst | 6 ++++++
 2 files changed, 11 insertions(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index 8013ba1..84cfb1a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -496,6 +496,11 @@ F: drivers/raw/skeleton/
 F: app/test/test_rawdev.c
 F: doc/guides/prog_guide/rawdev.rst
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+F: doc/guides/prog_guide/dmadev.rst
+
 
 Memory Pool Drivers
 -------------------
diff --git a/doc/guides/rel_notes/release_21_08.rst b/doc/guides/rel_notes/release_21_08.rst
index 16bb9ce..93068a2 100644
--- a/doc/guides/rel_notes/release_21_08.rst
+++ b/doc/guides/rel_notes/release_21_08.rst
@@ -175,6 +175,12 @@ New Features
   Updated testpmd application to log errors and warnings to stderr
   instead of stdout used before.
 
+* **Added dmadev library support.**
+
+  The dmadev library provides a DMA device framework for management and
+  provisioning of hardware and software DMA poll mode drivers, defining generic
+  APIs which support a number of different DMA operations.
+
 
 Removed Items
 -------------
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v11 2/2] doc: add dmadev library guide
  2021-07-29 11:02     ` Jerin Jacob
@ 2021-07-29 13:13       ` fengchengwen
  2021-07-29 13:28         ` fengchengwen
  0 siblings, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-07-29 13:13 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Thomas Monjalon, Ferruh Yigit, Richardson, Bruce, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On 2021/7/29 19:02, Jerin Jacob wrote:

[snip]

>> +
>> +The DMA library follows the same basic principles as those used in DPDK's
>> +Ethernet Device framework and the RegEx framework. The DMA framework provides
>> +a generic DMA device framework which supports both physical (hardware)
>> +and virtual (software) DMA devices as well as a generic DMA API which allows
>> +DMA devices to be managed and configured and supports DMA operations to be
>> +provisioned on DMA poll mode driver.
>> +
>> +Figure below outlines the model of the DMA framework built on:
>> +
>> +.. code-block:: console
>> +
>> +    +-------------+   +-------------+       +-------------+
>> +    | virtual DMA |   | virtual DMA |       | virtual DMA |
>> +    | channel     |   | channel     |       | channel     |
>> +    +-------------+   +-------------+       +-------------+
>> +           |                 |                     |
>> +           -------------------                     |
>> +                    |                              |
>> +              +----------+                    +----------+
>> +              |  dmadev  |                    |  dmadev  |
>> +              +----------+                    +----------+
>> +                    |                              |
>> +            +--------------+                +--------------+
>> +            | hardware DMA |                | hardware DMA |
>> +            | channel      |                | channel      |
>> +            +--------------+                +--------------+
>> +                    |                              |
>> +                    --------------------------------
>> +                                    |
>> +                             +--------------+
>> +                             | hardware DMA |
>> +                             | controller   |
>> +                             +--------------+
> 
> Please change to .svg file.
> See grep -ri "Inkscape" doc/guides/contributing/documentation.rst
> for guidelines.

Already send v12 without this, could it do later after merge ?

[snip]

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v11 2/2] doc: add dmadev library guide
  2021-07-29 13:13       ` fengchengwen
@ 2021-07-29 13:28         ` fengchengwen
  0 siblings, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-07-29 13:28 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Thomas Monjalon, Ferruh Yigit, Richardson, Bruce, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

sorry, self-response, add an explanation inline

On 2021/7/29 21:13, fengchengwen wrote:
> On 2021/7/29 19:02, Jerin Jacob wrote:
> 
> [snip]
> 
>>> +
>>> +The DMA library follows the same basic principles as those used in DPDK's
>>> +Ethernet Device framework and the RegEx framework. The DMA framework provides
>>> +a generic DMA device framework which supports both physical (hardware)
>>> +and virtual (software) DMA devices as well as a generic DMA API which allows
>>> +DMA devices to be managed and configured and supports DMA operations to be
>>> +provisioned on DMA poll mode driver.
>>> +
>>> +Figure below outlines the model of the DMA framework built on:
>>> +
>>> +.. code-block:: console
>>> +
>>> +    +-------------+   +-------------+       +-------------+
>>> +    | virtual DMA |   | virtual DMA |       | virtual DMA |
>>> +    | channel     |   | channel     |       | channel     |
>>> +    +-------------+   +-------------+       +-------------+
>>> +           |                 |                     |
>>> +           -------------------                     |
>>> +                    |                              |
>>> +              +----------+                    +----------+
>>> +              |  dmadev  |                    |  dmadev  |
>>> +              +----------+                    +----------+
>>> +                    |                              |
>>> +            +--------------+                +--------------+
>>> +            | hardware DMA |                | hardware DMA |
>>> +            | channel      |                | channel      |
>>> +            +--------------+                +--------------+
>>> +                    |                              |
>>> +                    --------------------------------
>>> +                                    |
>>> +                             +--------------+
>>> +                             | hardware DMA |
>>> +                             | controller   |
>>> +                             +--------------+
>>
>> Please change to .svg file.
>> See grep -ri "Inkscape" doc/guides/contributing/documentation.rst
>> for guidelines.
> 
> Already send v12 without this, could it do later after merge ?

I'm not familiar with "Inkscape" at the moment, and it may take some time to
modify it. so could we go with the above raw version first ?

> 
> [snip]
> .
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v11 1/2] dmadev: introduce DMA device library
  2021-07-29 10:44         ` Jerin Jacob
@ 2021-07-29 13:30           ` fengchengwen
  0 siblings, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-07-29 13:30 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Bruce Richardson, Thomas Monjalon, Ferruh Yigit, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

Thanks, comment inline

On 2021/7/29 18:44, Jerin Jacob wrote:
> On Thu, Jul 29, 2021 at 6:56 AM fengchengwen <fengchengwen@huawei.com> wrote:
>>
>> Thanks, inline comment
>>
>> On 2021/7/28 19:13, Bruce Richardson wrote:
>>> On Tue, Jul 27, 2021 at 11:39:59AM +0800, Chengwen Feng wrote:
>>>> This patch introduce 'dmadevice' which is a generic type of DMA
>>>> device.
>>>>
>>>> The APIs of dmadev library exposes some generic operations which can
>>>> enable configuration and I/O with the DMA devices.
>>>>
>>>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
>>>> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
>>>> Acked-by: Morten Brørup <mb@smartsharesystems.com>
>>>> ---
>>>
>>> Thanks for this. Before it gets merged, I believe it needs to be split
>>> further into multiple patches (say 4 or so) rather than adding the whole
>>> lib in one go.
>>>
>>> Normally, I believe the split would be something like:
>>> * basic device structures and infrastructure e.g. alloc and release
>>>   functions
>>> * device config functions (and structures to go along with them)
>>>   such as configure and queue_setup
>>> * data plane functions
>>>
>>
>> I will try for it
>> Maybe one patch for public file, one for pmd header file, one for
>> implementation, and last for doc.
> 
> +1.
> 
>>
>>> Documentation would be included in each of the patches, rather than done as
>>> a block at the end.
>>>
>>> Besides that, I have one small additional requests for the API. Based off
>>> feedback for ioat driver, we added in the following function to that API,
>>> and we probably need something similar in dmadev:
>>>
>>> rte_ioat_burst_capacity()
>>>
>>> For our implementation this returns the number of elements that can be
>>> enqueued to the ring, at least for the current burst/batch of packets. We
>>> did the API this way because there can be additional limits beyond ring
>>> size on each individual burst beyond just the raw ring capacity, e.g. even
>>> if there are 4k ring elements free, there may be limits on the max burst
>>> size the hardware can do, or limits on the number of outstanding
>>> batches etc.
>>>
>>> Therefore can I request the addition of rte_dmadev_burst_capacity() [or
>>> something similarly named] to the basic dmadev API set. For most hardware,
>>> I think this will likely be the remaining free ring size, but I don't
>>> believe the API should commit to that. The use case it was added for was to
>>> enable an application which needs to do a multi-copy operation to check
>>> that all copies can fit or not before enqueuing the first one. This is
>>> important for hardware that doesn't have scatter-gather list support.
> 
> Yes. Could you add the following to enable scatter-gather support in
> rte_dmadev_info::sge_max
> /**<Maximum allowed number of scatter-gather entries in a single sg call. */
>         uint16_t sge_max;
> 

already add max_sges, the naming method (start with max_) complies with the existing fields.

> 
>>
>> Remaining capacity can be inferred by ring_idx which return from enqueue and
>> dequeue APIs.
>> So I don't think this API needs to be added.
>>
>> For scatter-gather list, there maybe a hardware limit for max src or dst entry
>> size, I prefer add 'max_sges' filed in struct rte_dmadev_info to indicate it.
>>
>>>
>>> /Bruce
>>>
>>> PS: One typo in code flagged below too.
>>>
>>> <snip>
>>>> + */
>>>> +enum rte_dma_status_code {
>>>> +    RTE_DMA_STATUS_SUCCESSFUL,
>>>> +    /**< The operation completed successfully. */
>>>> +    RTE_DMA_STATUS_USRER_ABORT,
>>>
>>>               Typo here ^^^
>>>
>>
>> OK, USRER->USER will fix later
>>
>>> .
>>>
> .
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v11 1/2] dmadev: introduce DMA device library
  2021-07-29  9:15         ` Bruce Richardson
@ 2021-07-29 13:33           ` fengchengwen
  0 siblings, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-07-29 13:33 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

Thanks, comment inline

On 2021/7/29 17:15, Bruce Richardson wrote:
> On Thu, Jul 29, 2021 at 09:26:31AM +0800, fengchengwen wrote:
>> Thanks, inline comment
>>
>> On 2021/7/28 19:13, Bruce Richardson wrote:
>>> On Tue, Jul 27, 2021 at 11:39:59AM +0800, Chengwen Feng wrote:
>>>> This patch introduce 'dmadevice' which is a generic type of DMA
>>>> device.
>>>>
>>>> The APIs of dmadev library exposes some generic operations which can
>>>> enable configuration and I/O with the DMA devices.
>>>>
>>>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
>>>> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
>>>> Acked-by: Morten Brørup <mb@smartsharesystems.com>
>>>> ---
>>>
>>> Thanks for this. Before it gets merged, I believe it needs to be split
>>> further into multiple patches (say 4 or so) rather than adding the whole
>>> lib in one go.
>>>
>>> Normally, I believe the split would be something like:
>>> * basic device structures and infrastructure e.g. alloc and release
>>>   functions
>>> * device config functions (and structures to go along with them)
>>>   such as configure and queue_setup
>>> * data plane functions
>>>
>>
>> I will try for it
>> Maybe one patch for public file, one for pmd header file, one for
>> implementation, and last for doc.
>>
>>> Documentation would be included in each of the patches, rather than done as
>>> a block at the end.
>>>
>>> Besides that, I have one small additional requests for the API. Based off
>>> feedback for ioat driver, we added in the following function to that API,
>>> and we probably need something similar in dmadev:
>>>
>>> rte_ioat_burst_capacity()
>>>
>>> For our implementation this returns the number of elements that can be
>>> enqueued to the ring, at least for the current burst/batch of packets. We
>>> did the API this way because there can be additional limits beyond ring
>>> size on each individual burst beyond just the raw ring capacity, e.g. even
>>> if there are 4k ring elements free, there may be limits on the max burst
>>> size the hardware can do, or limits on the number of outstanding
>>> batches etc.
>>>
>>> Therefore can I request the addition of rte_dmadev_burst_capacity() [or
>>> something similarly named] to the basic dmadev API set. For most hardware,
>>> I think this will likely be the remaining free ring size, but I don't
>>> believe the API should commit to that. The use case it was added for was to
>>> enable an application which needs to do a multi-copy operation to check
>>> that all copies can fit or not before enqueuing the first one. This is
>>> important for hardware that doesn't have scatter-gather list support.
>>
>> Remaining capacity can be inferred by ring_idx which return from enqueue and
>> dequeue APIs.
>> So I don't think this API needs to be added.
>>
> 
> Yes, the app can always track it itself, but I still see value in adding
> this API. However, so long as you are open to having it added later, it
> doesn't matter if it's not present in the first versions of this API merged
> in.

Got it, I think we could discuss later when we adapt more drivers.

> 
> /Bruce
> .
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v13 0/6] support dmadev
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (15 preceding siblings ...)
  2021-07-29 13:06 ` [dpdk-dev] [PATCH v12 0/6] support dmadev Chengwen Feng
@ 2021-08-03 11:29 ` Chengwen Feng
  2021-08-03 11:29   ` [dpdk-dev] [PATCH v13 1/6] dmadev: introduce DMA device library public APIs Chengwen Feng
                     ` (6 more replies)
  2021-08-10 11:54 ` [dpdk-dev] [PATCH v14 " Chengwen Feng
                   ` (12 subsequent siblings)
  29 siblings, 7 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-03 11:29 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch set contains six patch for new add dmadev.

Chengwen Feng (6):
  dmadev: introduce DMA device library public APIs
  dmadev: introduce DMA device library internal header
  dmadev: introduce DMA device library PMD header
  dmadev: introduce DMA device library implementation
  doc: add DMA device library guide
  maintainers: add for dmadev

---
v13:
* add dmadev_i1.svg.
* delete one unnecessary comment line of rte_dmadev_info_get.
v12:
* add max_sges filed for struct rte_dmadev_info.
* add more descriptor of dmadev.rst.
* replace scatter with scatter gather in code comment.
* split to six patch.
* fix typo.
v11:
* rename RTE_DMA_STATUS_UNKNOWN to RTE_DMA_STATUS_ERROR_UNKNOWN.
* add RTE_DMA_STATUS_INVALID_ADDR marco.
* update release-note.
* add acked-by for 1/2 patch.
* add dmadev programming guide which is 2/2 patch.
v10:
* fix rte_dmadev_completed_status comment.

 MAINTAINERS                             |    5 +
 config/rte_config.h                     |    3 +
 doc/api/doxy-api-index.md               |    1 +
 doc/api/doxy-api.conf.in                |    1 +
 doc/guides/prog_guide/dmadev.rst        |  126 ++++
 doc/guides/prog_guide/img/dmadev_i1.svg |  278 ++++++++
 doc/guides/prog_guide/index.rst         |    1 +
 doc/guides/rel_notes/release_21_08.rst  |    6 +
 lib/dmadev/meson.build                  |    7 +
 lib/dmadev/rte_dmadev.c                 |  563 ++++++++++++++++
 lib/dmadev/rte_dmadev.h                 | 1058 +++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h            |  182 ++++++
 lib/dmadev/rte_dmadev_pmd.h             |   72 +++
 lib/dmadev/version.map                  |   36 ++
 lib/meson.build                         |    1 +
 15 files changed, 2340 insertions(+)
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev_i1.svg
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v13 1/6] dmadev: introduce DMA device library public APIs
  2021-08-03 11:29 ` [dpdk-dev] [PATCH v13 0/6] support dmadev Chengwen Feng
@ 2021-08-03 11:29   ` Chengwen Feng
  2021-08-03 11:29   ` [dpdk-dev] [PATCH v13 2/6] dmadev: introduce DMA device library internal header Chengwen Feng
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-03 11:29 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

The 'dmadevice' is a generic type of DMA device.

This patch introduce the 'dmadevice' public APIs which expose generic
operations that can enable configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
---
 doc/api/doxy-api-index.md |   1 +
 doc/api/doxy-api.conf.in  |   1 +
 lib/dmadev/meson.build    |   4 +
 lib/dmadev/rte_dmadev.h   | 962 ++++++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/version.map    |  25 ++
 lib/meson.build           |   1 +
 6 files changed, 994 insertions(+)
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/version.map

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107..ce08250 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -27,6 +27,7 @@ The public API headers are grouped by topics:
   [event_timer_adapter]    (@ref rte_event_timer_adapter.h),
   [event_crypto_adapter]   (@ref rte_event_crypto_adapter.h),
   [rawdev]             (@ref rte_rawdev.h),
+  [dmadev]             (@ref rte_dmadev.h),
   [metrics]            (@ref rte_metrics.h),
   [bitrate]            (@ref rte_bitrate.h),
   [latency]            (@ref rte_latencystats.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 325a019..a44a92b 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -34,6 +34,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/cmdline \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
+                          @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/distributor \
                           @TOPDIR@/lib/efd \
                           @TOPDIR@/lib/ethdev \
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000..6d5bd85
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+headers = files('rte_dmadev.h')
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000..1090b06
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,962 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ * Copyright(c) 2021 Marvell International Ltd.
+ * Copyright(c) 2021 SmartShare Systems.
+ */
+
+#ifndef _RTE_DMADEV_H_
+#define _RTE_DMADEV_H_
+
+/**
+ * @file rte_dmadev.h
+ *
+ * RTE DMA (Direct Memory Access) device APIs.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW-DMA-channel |               | HW-DMA-channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW-DMA-Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel. e.g. Application could create
+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+ * virtual DMA channel 1 for memory-to-device transfer scenario.
+ *
+ * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
+ * PCI/SoC device probing phase performed at EAL initialization time. And could
+ * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
+ * phase.
+ *
+ * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dmadev_configure()
+ *     - rte_dmadev_vchan_setup()
+ *     - rte_dmadev_start()
+ *
+ * Then, the application can invoke dataplane APIs to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dmadev_configure() or rte_dmadev_vchan_setup()), it must invoke
+ * rte_dmadev_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dmadev_start() again. The dataplane APIs should not be
+ * invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the
+ * rte_dmadev_close() function.
+ *
+ * The dataplane APIs include two parts:
+ * The first part is the submission of operation requests:
+ *     - rte_dmadev_copy()
+ *     - rte_dmadev_copy_sg()
+ *     - rte_dmadev_fill()
+ *     - rte_dmadev_submit()
+ *
+ * These APIs could work with different virtual DMA channels which have
+ * different contexts.
+ *
+ * The first three APIs are used to submit the operation request to the virtual
+ * DMA channel, if the submission is successful, an uint16_t ring_idx is
+ * returned, otherwise a negative number is returned.
+ *
+ * The last API was used to issue doorbell to hardware, and also there are flags
+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
+ * same work.
+ *
+ * The second part is to obtain the result of requests:
+ *     - rte_dmadev_completed()
+ *         - return the number of operation requests completed successfully.
+ *     - rte_dmadev_completed_status()
+ *         - return the number of operation requests completed.
+ *
+ * @note If the dmadev works in silent mode (@see RTE_DMADEV_CAPA_SILENT),
+ * application does not invoke the above two completed APIs.
+ *
+ * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
+ * rte_dmadev_fill()) returned, the rules are as follows:
+ *     - ring_idx for each virtual DMA channel are independent.
+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *       when it reach UINT16_MAX, it wraps back to zero.
+ *     - This ring_idx can be used by applications to track per-operation
+ *       metadata in an application-defined circular ring.
+ *     - The initial ring_idx of a virtual DMA channel is zero, after the
+ *       device is stopped, the ring_idx needs to be reset to zero.
+ *
+ * One example:
+ *     - step-1: start one dmadev
+ *     - step-2: enqueue a copy operation, the ring_idx return is 0
+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1
+ *     - ...
+ *     - step-101: stop the dmadev
+ *     - step-102: start the dmadev
+ *     - step-103: enqueue a copy operation, the cookie return is 0
+ *     - ...
+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *
+ * The DMA operation address used in enqueue APIs (i.e. rte_dmadev_copy(),
+ * rte_dmadev_copy_sg(), rte_dmadev_fill()) defined as rte_iova_t type. The
+ * dmadev supports two types of address: memory address and device address.
+ *
+ * - memory address: the source and destination address of the memory-to-memory
+ * transfer type, or the source address of the memory-to-device transfer type,
+ * or the destination address of the device-to-memory transfer type.
+ * @note If the device support SVA (@see RTE_DMADEV_CAPA_SVA), the memory
+ * address can be any VA address, otherwise it must be an IOVA address.
+ *
+ * - device address: the source and destination address of the device-to-device
+ * transfer type, or the source address of the device-to-memory transfer type,
+ * or the destination address of the memory-to-device transfer type.
+ *
+ * By default, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
+ *
+ */
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the device identifier for the named DMA device.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   Returns DMA device identifier on success.
+ *   - <0: Failure to find named DMA device.
+ */
+__rte_experimental
+int
+rte_dmadev_get_dev_id(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_count(void);
+
+/* Enumerates DMA device capabilities. */
+#define RTE_DMADEV_CAPA_MEM_TO_MEM	(1ull << 0)
+/**< DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_MEM_TO_DEV	(1ull << 1)
+/**< DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_MEM	(1ull << 2)
+/**< DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_DEV	(1ull << 3)
+/**< DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_SVA		(1ull << 4)
+/**< DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_SILENT		(1ull << 5)
+/**< DMA device support work in silent mode.
+ * In this mode, application don't required to invoke rte_dmadev_completed*()
+ * API.
+ *
+ * @see struct rte_dmadev_conf::silent_mode
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY	(1ull << 32)
+/**< DMA device support copy ops.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY_SG	(1ull << 33)
+/**< DMA device support scatter-gather list copy ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_FILL	(1ull << 34)
+/**< DMA device support fill ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ */
+struct rte_dmadev_info {
+	struct rte_device *device; /**< Generic Device information. */
+	uint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channels supported. */
+	uint16_t max_desc;
+	/**< Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/**< Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t max_sges;
+	/**< Maximum number of source or destination scatter-gather entry
+	 * supported.
+	 * If the device does not support COPY_SG capability, this value can be
+	 * zero.
+	 * If the device supports COPY_SG capability, then rte_dmadev_copy_sg()
+	 * parameter nb_src/nb_dst should not exceed this value.
+	 */
+	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   - =0: Success, driver updates the information of the DMA device.
+ *   - <0: Error code returned by the driver info get function.
+ */
+__rte_experimental
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ */
+struct rte_dmadev_conf {
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channel to use.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dmadev_info which get from rte_dmadev_info_get().
+	 */
+	bool enable_silent;
+	/**< Indicates whether to enable silent mode.
+	 * false-default mode, true-silent mode.
+	 * This value can be set to true only when the SILENT capability is
+	 * supported.
+	 *
+	 * @see RTE_DMADEV_CAPA_SILENT
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dmadev_conf
+ *   object.
+ *
+ * @return
+ *   - =0: Success, device configured.
+ *   - <0: Error code returned by the driver configuration function.
+ */
+__rte_experimental
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device started.
+ *   - <0: Error code returned by the driver start function.
+ */
+__rte_experimental
+int
+rte_dmadev_start(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dmadev_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device stopped.
+ *   - <0: Error code returned by the driver stop function.
+ */
+__rte_experimental
+int
+rte_dmadev_stop(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *  - =0: Successfully close device
+ *  - <0: Failure to close device
+ */
+__rte_experimental
+int
+rte_dmadev_close(uint16_t dev_id);
+
+/**
+ * rte_dma_direction - DMA transfer direction defines.
+ */
+enum rte_dma_direction {
+	RTE_DMA_DIR_MEM_TO_MEM,
+	/**< DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV,
+	/**< DMA transfer direction - from memory to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is SoCs memory) to device (which is host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM,
+	/**< DMA transfer direction - from device to memory.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to memory (which is SoCs memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV,
+	/**< DMA transfer direction - from device to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to the device (which is another host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+};
+
+/**
+ * enum rte_dmadev_port_type - DMA access port type defines.
+ *
+ * @see struct rte_dmadev_port_param::port_type
+ */
+enum rte_dmadev_port_type {
+	RTE_DMADEV_PORT_NONE,
+	RTE_DMADEV_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dmadev_vchan_conf::src_port
+ * @see struct rte_dmadev_vchan_conf::dst_port
+ */
+struct rte_dmadev_port_param {
+	enum rte_dmadev_port_type port_type;
+	/**< The device access port type.
+	 * @see enum rte_dmadev_port_type
+	 */
+	union {
+		/** PCIe access port parameters.
+		 *
+		 * The following model shows SoC's PCIe module connects to
+		 * multiple PCIe hosts and multiple endpoints. The PCIe module
+		 * has an integrated DMA controller.
+		 *
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * \code{.unparsed}
+		 * System Bus
+		 *    |     ----------PCIe module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIe Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIe Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIe Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * \endcode
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		struct {
+			uint64_t coreid : 4; /**< PCIe core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			uint64_t pasid : 20;
+			/**< The pasid filed in TLP packet. */
+			uint64_t attr : 3;
+			/**< The attributes filed in TLP packet. */
+			uint64_t ph : 2;
+			/**< The processing hint filed in TLP packet. */
+			uint64_t st : 16;
+			/**< The steering tag filed in TLP packet. */
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ */
+struct rte_dmadev_vchan_conf {
+	enum rte_dma_direction direction;
+	/**< Transfer direction
+	 * @see enum rte_dma_direction
+	 */
+	uint16_t nb_desc;
+	/**< Number of descriptor for the virtual DMA channel */
+	struct rte_dmadev_port_param src_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+	struct rte_dmadev_port_param dst_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dmadev_vchan_conf object.
+ *
+ * @return
+ *   - >=0: Allocate success, it is the virtual DMA channel id. This value must
+ *          be less than the field 'max_vchans' of struct rte_dmadev_conf
+ *          which configured by rte_dmadev_configure().
+ *   - <0: Error code returned by the driver virtual channel setup function.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf);
+
+/**
+ * rte_dmadev_stats - running statistics.
+ */
+struct rte_dmadev_stats {
+	uint64_t submitted_count;
+	/**< Count of operations which were submitted to hardware. */
+	uint64_t completed_fail_count;
+	/**< Count of operations which failed to complete. */
+	uint64_t completed_count;
+	/**< Count of operations which successfully complete. */
+};
+
+#define RTE_DMADEV_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dmadev_stats
+ *   object.
+ *
+ * @return
+ *   - =0: Successfully retrieve stats.
+ *   - <0: Failure to retrieve stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   - =0: Successfully reset stats.
+ *   - <0: Failure to reset stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Non-zero otherwise.
+ */
+__rte_experimental
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger the dmadev self test.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - 0: Selftest successful.
+ *   - -ENOTSUP if the device doesn't support selftest
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_selftest(uint16_t dev_id);
+
+/**
+ * rte_dma_status_code - DMA transfer result status code defines.
+ */
+enum rte_dma_status_code {
+	RTE_DMA_STATUS_SUCCESSFUL,
+	/**< The operation completed successfully. */
+	RTE_DMA_STATUS_USER_ABORT,
+	/**< The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPTED,
+	/**< The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/**< The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/**< The operation failed to complete due invalid destination
+	 * address.
+	 */
+	RTE_DMA_STATUS_INVALID_ADDR,
+	/**< The operation failed to complete due invalid source or destination
+	 * address, cover the case that only knows the address error, but not
+	 * sure which address error.
+	 */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/**< The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/**< The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/**< The operation failed to complete due bus err. */
+	RTE_DMA_STATUS_DATA_POISION,
+	/**< The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/**< The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/**< The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the memory-to-device/
+	 * device-to-memory/device-to-device transfer scenario.
+	 */
+	RTE_DMA_STATUS_ERROR_UNKNOWN = 0x100,
+	/**< The operation failed to complete due unknown reason.
+	 * The initial value is 256, which reserves space for future errors.
+	 */
+};
+
+/**
+ * rte_dmadev_sge - can hold scatter-gather DMA operation request entry.
+ */
+struct rte_dmadev_sge {
+	rte_iova_t addr; /**< The DMA operation address. */
+	uint32_t length; /**< The DMA operation length. */
+};
+
+/* DMA flags to augment operation preparation. */
+#define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
+/**< DMA fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ * If the specify DMA HW works in-order (it means it has default fence between
+ * operations), this flag could be NOP.
+ *
+ * @see rte_dmadev_copy()
+ * @see rte_dmadev_copy_sg()
+ * @see rte_dmadev_fill()
+ */
+
+#define RTE_DMA_OP_FLAG_SUBMIT	(1ull << 1)
+/**< DMA submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ */
+
+#define RTE_DMA_OP_FLAG_LLC	(1ull << 2)
+/**< DMA write data to low level cache hint.
+ * Used for performance optimization, this is just a hint, and there is no
+ * capability bit for this, driver should not return error if this flag was set.
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy job.
+ *   - <0: Error code returned by the driver copy function.
+ */
+__rte_experimental
+int
+rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+		uint32_t length, uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter-gather list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter-gather list copy operation to be performed by
+ * hardware, if the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then
+ * trigger doorbell to begin this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The pointer of source scatter-gather entry array.
+ * @param dst
+ *   The pointer of destination scatter-gather entry array.
+ * @param nb_src
+ *   The number of source scatter-gather entry.
+ *   @see struct rte_dmadev_info::max_sges
+ * @param nb_dst
+ *   The number of destination scatter-gather entry.
+ *   @see struct rte_dmadev_info::max_sges
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy scatter-gather list job.
+ *   - <0: Error code returned by the driver copy scatter-gather list function.
+ */
+__rte_experimental
+int
+rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
+		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
+		   uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued fill job.
+ *   - <0: Error code returned by the driver fill function.
+ */
+__rte_experimental
+int
+rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
+		rte_iova_t dst, uint32_t length, uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dmadev_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   - =0: Successfully trigger hardware.
+ *   - <0: Failure to trigger hardware.
+ */
+__rte_experimental
+int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed. This return value
+ *   must be less than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		     uint16_t *last_idx, bool *has_error);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been completed, and the
+ * operations result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   This is a pointer to an array of length 'nb_cpls' that holds the completion
+ *   status code of each operation.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that completed. This return value must be less
+ *   than or equal to the value of nb_cpls.
+ *   If this number is greater than zero (assuming n), then n values in the
+ *   status array are also set.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
+			    const uint16_t nb_cpls, uint16_t *last_idx,
+			    enum rte_dma_status_code *status);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000..02fffe3
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,25 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dmadev_close;
+	rte_dmadev_completed;
+	rte_dmadev_completed_status;
+	rte_dmadev_configure;
+	rte_dmadev_copy;
+	rte_dmadev_copy_sg;
+	rte_dmadev_count;
+	rte_dmadev_dump;
+	rte_dmadev_fill;
+	rte_dmadev_get_dev_id;
+	rte_dmadev_info_get;
+	rte_dmadev_is_valid_dev;
+	rte_dmadev_selftest;
+	rte_dmadev_start;
+	rte_dmadev_stats_get;
+	rte_dmadev_stats_reset;
+	rte_dmadev_stop;
+	rte_dmadev_submit;
+	rte_dmadev_vchan_setup;
+
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4..a542c23 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -44,6 +44,7 @@ libraries = [
         'power',
         'pdump',
         'rawdev',
+        'dmadev',
         'regexdev',
         'rib',
         'reorder',
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v13 2/6] dmadev: introduce DMA device library internal header
  2021-08-03 11:29 ` [dpdk-dev] [PATCH v13 0/6] support dmadev Chengwen Feng
  2021-08-03 11:29   ` [dpdk-dev] [PATCH v13 1/6] dmadev: introduce DMA device library public APIs Chengwen Feng
@ 2021-08-03 11:29   ` Chengwen Feng
  2021-08-03 11:29   ` [dpdk-dev] [PATCH v13 3/6] dmadev: introduce DMA device library PMD header Chengwen Feng
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-03 11:29 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce DMA device library internal header, which contains
internal data types that are used by the DMA devices in order to expose
their ops to the class.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 lib/dmadev/meson.build       |   1 +
 lib/dmadev/rte_dmadev_core.h | 180 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 181 insertions(+)
 create mode 100644 lib/dmadev/rte_dmadev_core.h

diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index 6d5bd85..f421ec1 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -2,3 +2,4 @@
 # Copyright(c) 2021 HiSilicon Limited.
 
 headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000..599ab15
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#ifndef _RTE_DMADEV_CORE_H_
+#define _RTE_DMADEV_CORE_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device internal header.
+ *
+ * This header contains internal data types, that are used by the DMA devices
+ * in order to expose their ops to the class.
+ *
+ * Applications should not use these API directly.
+ *
+ */
+
+struct rte_dmadev;
+
+typedef int (*rte_dmadev_info_get_t)(const struct rte_dmadev *dev,
+				     struct rte_dmadev_info *dev_info,
+				     uint32_t info_sz);
+/**< @internal Used to get device information of a device. */
+
+typedef int (*rte_dmadev_configure_t)(struct rte_dmadev *dev,
+				      const struct rte_dmadev_conf *dev_conf);
+/**< @internal Used to configure a device. */
+
+typedef int (*rte_dmadev_start_t)(struct rte_dmadev *dev);
+/**< @internal Used to start a configured device. */
+
+typedef int (*rte_dmadev_stop_t)(struct rte_dmadev *dev);
+/**< @internal Used to stop a configured device. */
+
+typedef int (*rte_dmadev_close_t)(struct rte_dmadev *dev);
+/**< @internal Used to close a configured device. */
+
+typedef int (*rte_dmadev_vchan_setup_t)(struct rte_dmadev *dev,
+				const struct rte_dmadev_vchan_conf *conf);
+/**< @internal Used to allocate and set up a virtual DMA channel. */
+
+typedef int (*rte_dmadev_stats_get_t)(const struct rte_dmadev *dev,
+			uint16_t vchan, struct rte_dmadev_stats *stats,
+			uint32_t stats_sz);
+/**< @internal Used to retrieve basic statistics. */
+
+typedef int (*rte_dmadev_stats_reset_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to reset basic statistics. */
+
+typedef int (*rte_dmadev_dump_t)(const struct rte_dmadev *dev, FILE *f);
+/**< @internal Used to dump internal information. */
+
+typedef int (*rte_dmadev_selftest_t)(uint16_t dev_id);
+/**< @internal Used to start dmadev selftest. */
+
+typedef int (*rte_dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 rte_iova_t src, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a copy operation. */
+
+typedef int (*rte_dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
+				    const struct rte_dmadev_sge *src,
+				    const struct rte_dmadev_sge *dst,
+				    uint16_t nb_src, uint16_t nb_dst,
+				    uint64_t flags);
+/**< @internal Used to enqueue a scatter-gather list copy operation. */
+
+typedef int (*rte_dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 uint64_t pattern, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a fill operation. */
+
+typedef int (*rte_dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to trigger hardware to begin working. */
+
+typedef uint16_t (*rte_dmadev_completed_t)(struct rte_dmadev *dev,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+/**< @internal Used to return number of successful completed operations. */
+
+typedef uint16_t (*rte_dmadev_completed_status_t)(struct rte_dmadev *dev,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+/**< @internal Used to return number of completed operations. */
+
+/**
+ * Possible states of a DMA device.
+ */
+enum rte_dmadev_state {
+	RTE_DMADEV_UNUSED = 0,
+	/**< Device is unused before being probed. */
+	RTE_DMADEV_ATTACHED,
+	/**< Device is attached when allocated in probing. */
+};
+
+/**
+ * DMA device operations function pointer table
+ */
+struct rte_dmadev_ops {
+	rte_dmadev_info_get_t dev_info_get;
+	rte_dmadev_configure_t dev_configure;
+	rte_dmadev_start_t dev_start;
+	rte_dmadev_stop_t dev_stop;
+	rte_dmadev_close_t dev_close;
+	rte_dmadev_vchan_setup_t vchan_setup;
+	rte_dmadev_stats_get_t stats_get;
+	rte_dmadev_stats_reset_t stats_reset;
+	rte_dmadev_dump_t dev_dump;
+	rte_dmadev_selftest_t dev_selftest;
+};
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_dmadev_data {
+	void *dev_private;
+	/**< PMD-specific private data.
+	 * This is a copy of the 'dev_private' field in the 'struct rte_dmadev'
+	 * from primary process, it is used by the secondary process to get
+	 * dev_private information.
+	 */
+	uint16_t dev_id; /**< Device [external] identifier. */
+	char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ *
+ * The dataplane APIs are located at the beginning of the structure, along
+ * with the pointer to where all the data elements for the particular device
+ * are stored in shared memory. This split scheme allows the function pointer
+ * and driver data to be per-process, while the actual configuration data for
+ * the device is shared.
+ * And the 'dev_private' field was placed in the first cache line to optimize
+ * performance because the PMD driver mainly depends on this field.
+ */
+struct rte_dmadev {
+	rte_dmadev_copy_t copy;
+	rte_dmadev_copy_sg_t copy_sg;
+	rte_dmadev_fill_t fill;
+	rte_dmadev_submit_t submit;
+	rte_dmadev_completed_t completed;
+	rte_dmadev_completed_status_t completed_status;
+	void *reserved_ptr; /**< Reserved for future IO function. */
+	void *dev_private;
+	/**< PMD-specific private data.
+	 *
+	 * - If is the primary process, after dmadev allocated by
+	 * rte_dmadev_pmd_allocate(), the PCI/SoC device probing should
+	 * initialize this field, and copy it's value to the 'dev_private'
+	 * field of 'struct rte_dmadev_data' which pointer by 'data' filed.
+	 *
+	 * - If is the secondary process, dmadev framework will initialize this
+	 * field by copy from 'dev_private' field of 'struct rte_dmadev_data'
+	 * which initialized by primary process.
+	 *
+	 * @note It's the primary process responsibility to deinitialize this
+	 * field after invoke rte_dmadev_pmd_release() in the PCI/SoC device
+	 * removing stage.
+	 */
+	struct rte_dmadev_data *data; /**< Pointer to device data. */
+	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
+	struct rte_device *device;
+	/**< Device info which supplied during device initialization. */
+	enum rte_dmadev_state state; /**< Flag indicating the device state. */
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+} __rte_cache_aligned;
+
+#endif /* _RTE_DMADEV_CORE_H_ */
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v13 3/6] dmadev: introduce DMA device library PMD header
  2021-08-03 11:29 ` [dpdk-dev] [PATCH v13 0/6] support dmadev Chengwen Feng
  2021-08-03 11:29   ` [dpdk-dev] [PATCH v13 1/6] dmadev: introduce DMA device library public APIs Chengwen Feng
  2021-08-03 11:29   ` [dpdk-dev] [PATCH v13 2/6] dmadev: introduce DMA device library internal header Chengwen Feng
@ 2021-08-03 11:29   ` Chengwen Feng
  2021-08-03 11:29   ` [dpdk-dev] [PATCH v13 4/6] dmadev: introduce DMA device library implementation Chengwen Feng
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-03 11:29 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce DMA device library PMD header which was driver
facing APIs for a DMA device.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 lib/dmadev/meson.build      |  1 +
 lib/dmadev/rte_dmadev.h     |  2 ++
 lib/dmadev/rte_dmadev_pmd.h | 72 +++++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/version.map      | 10 +++++++
 4 files changed, 85 insertions(+)
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h

diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index f421ec1..833baf7 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -3,3 +3,4 @@
 
 headers = files('rte_dmadev.h')
 indirect_headers += files('rte_dmadev_core.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 1090b06..439ad95 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -743,6 +743,8 @@ struct rte_dmadev_sge {
 	uint32_t length; /**< The DMA operation length. */
 };
 
+#include "rte_dmadev_core.h"
+
 /* DMA flags to augment operation preparation. */
 #define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
 /**< DMA fence flag.
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000..45141f9
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_PMD_H_
+#define _RTE_DMADEV_PMD_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device PMD APIs
+ *
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param dev
+ *   Device to be released.
+ *
+ * @return
+ *   - 0 on success, negative on error
+ */
+__rte_internal
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev);
+
+/**
+ * @internal
+ * Return the DMA device based on the device name.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_PMD_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index 02fffe3..408b93c 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -23,3 +23,13 @@ EXPERIMENTAL {
 
 	local: *;
 };
+
+INTERNAL {
+        global:
+
+	rte_dmadev_get_device_by_name;
+	rte_dmadev_pmd_allocate;
+	rte_dmadev_pmd_release;
+
+	local: *;
+};
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v13 4/6] dmadev: introduce DMA device library implementation
  2021-08-03 11:29 ` [dpdk-dev] [PATCH v13 0/6] support dmadev Chengwen Feng
                     ` (2 preceding siblings ...)
  2021-08-03 11:29   ` [dpdk-dev] [PATCH v13 3/6] dmadev: introduce DMA device library PMD header Chengwen Feng
@ 2021-08-03 11:29   ` Chengwen Feng
  2021-08-05 12:56     ` Walsh, Conor
  2021-08-03 11:29   ` [dpdk-dev] [PATCH v13 5/6] doc: add DMA device library guide Chengwen Feng
                     ` (2 subsequent siblings)
  6 siblings, 1 reply; 339+ messages in thread
From: Chengwen Feng @ 2021-08-03 11:29 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce DMA device library implementation which includes
configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 config/rte_config.h          |   3 +
 lib/dmadev/meson.build       |   1 +
 lib/dmadev/rte_dmadev.c      | 563 +++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev.h      | 118 ++++++++-
 lib/dmadev/rte_dmadev_core.h |   2 +
 lib/dmadev/version.map       |   1 +
 6 files changed, 676 insertions(+), 12 deletions(-)
 create mode 100644 lib/dmadev/rte_dmadev.c

diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c..331a431 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -81,6 +81,9 @@
 /* rawdev defines */
 #define RTE_RAWDEV_MAX_DEVS 64
 
+/* dmadev defines */
+#define RTE_DMADEV_MAX_DEVS 64
+
 /* ip_fragmentation defines */
 #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
 #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index 833baf7..d2fc85e 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2021 HiSilicon Limited.
 
+sources = files('rte_dmadev.c')
 headers = files('rte_dmadev.h')
 indirect_headers += files('rte_dmadev_core.h')
 driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000..b4f5498
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,563 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
+
+static const char *mz_rte_dmadev_data = "rte_dmadev_data";
+/* Shared memory between primary and secondary processes. */
+static struct {
+	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
+} *dmadev_shared_data;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dmadev_logtype, INFO);
+#define RTE_DMADEV_LOG(level, ...) \
+	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "" __VA_ARGS__)
+
+/* Macros to check for valid device id */
+#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dmadev_is_valid_dev(dev_id)) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+static int
+dmadev_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMADEV_LOG(ERR, "Name can't be NULL\n");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMADEV_LOG(ERR, "Zero length DMA device name\n");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
+		RTE_DMADEV_LOG(ERR, "DMA device name is too long\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint16_t
+dmadev_find_free_dev(void)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (dmadev_shared_data->data[i].dev_name[0] == '\0')
+			return i;
+	}
+
+	return RTE_DMADEV_MAX_DEVS;
+}
+
+static struct rte_dmadev*
+dmadev_find(const char *name)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&
+		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
+			return &rte_dmadevices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dmadev_shared_data_prepare(void)
+{
+	const struct rte_memzone *mz;
+
+	if (dmadev_shared_data == NULL) {
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			/* Allocate port data and ownership shared memory. */
+			mz = rte_memzone_reserve(mz_rte_dmadev_data,
+					 sizeof(*dmadev_shared_data),
+					 rte_socket_id(), 0);
+		} else
+			mz = rte_memzone_lookup(mz_rte_dmadev_data);
+		if (mz == NULL)
+			return -ENOMEM;
+
+		dmadev_shared_data = mz->addr;
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+			memset(dmadev_shared_data->data, 0,
+			       sizeof(dmadev_shared_data->data));
+	}
+
+	return 0;
+}
+
+static struct rte_dmadev *
+dmadev_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t dev_id;
+
+	dev = dmadev_find(name);
+	if (dev != NULL) {
+		RTE_DMADEV_LOG(ERR, "DMA device already allocated\n");
+		return NULL;
+	}
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	dev_id = dmadev_find_free_dev();
+	if (dev_id == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices\n");
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+	dev->data = &dmadev_shared_data->data[dev_id];
+	dev->data->dev_id = dev_id;
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+
+	return dev;
+}
+
+static struct rte_dmadev *
+dmadev_attach_secondary(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t i;
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %s is not driven by the primary process\n",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[i];
+	dev->data = &dmadev_shared_data->data[i];
+	dev->dev_private = dev->data->dev_private;
+
+	return dev;
+}
+
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dmadev_allocate(name);
+	else
+		dev = dmadev_attach_secondary(name);
+
+	if (dev == NULL)
+		return NULL;
+	dev->state = RTE_DMADEV_ATTACHED;
+
+	return dev;
+}
+
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev)
+{
+	void *dev_private_tmp;
+
+	if (dev == NULL)
+		return -EINVAL;
+
+	if (dev->state == RTE_DMADEV_UNUSED)
+		return 0;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
+
+	dev_private_tmp = dev->dev_private;
+	memset(dev, 0, sizeof(struct rte_dmadev));
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev->dev_private = dev_private_tmp;
+	dev->state = RTE_DMADEV_UNUSED;
+
+	return 0;
+}
+
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name)
+{
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+	return dmadev_find(name);
+}
+
+int
+rte_dmadev_get_dev_id(const char *name)
+{
+	struct rte_dmadev *dev = rte_dmadev_get_device_by_name(name);
+	if (dev != NULL)
+		return dev->data->dev_id;
+	return -EINVAL;
+}
+
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id)
+{
+	return (dev_id < RTE_DMADEV_MAX_DEVS) &&
+		rte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED;
+}
+
+uint16_t
+rte_dmadev_count(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
+			count++;
+	}
+
+	return count;
+}
+
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dmadev_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->device = dev->device;
+	dev_info->nb_vchans = dev->data->dev_conf.max_vchans;
+
+	return 0;
+}
+
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->max_vchans == 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure zero vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->max_vchans > info.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure too many vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_silent &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_SILENT)) {
+		RTE_DMADEV_LOG(ERR, "Device %u don't support silent\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf);
+	if (ret == 0)
+		memcpy(&dev->data->dev_conf, dev_conf, sizeof(*dev_conf));
+
+	return ret;
+}
+
+int
+rte_dmadev_start(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already started\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->data->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dmadev_stop(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started == 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already stopped\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dmadev_close(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped before closing\n", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	return (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_dmadev_vchan_setup(uint16_t dev_id,
+		       const struct rte_dmadev_vchan_conf *conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	bool src_is_dev, dst_is_dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u number of descriptors invalid\n", dev_id);
+		return -EINVAL;
+	}
+	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->src_port.port_type == RTE_DMADEV_PORT_NONE && src_is_dev) ||
+	    (conf->src_port.port_type != RTE_DMADEV_PORT_NONE && !src_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u source port type invalid\n", dev_id);
+		return -EINVAL;
+	}
+	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->dst_port.port_type == RTE_DMADEV_PORT_NONE && dst_is_dev) ||
+	    (conf->dst_port.port_type != RTE_DMADEV_PORT_NONE && !dst_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u destination port type invalid\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, conf);
+}
+
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (stats == NULL)
+		return -EINVAL;
+	if (vchan >= dev->data->dev_conf.max_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	memset(stats, 0, sizeof(struct rte_dmadev_stats));
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dmadev_stats));
+}
+
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->data->dev_conf.max_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (f == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+
+	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
+	fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
+	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
+	fprintf(f, "  max_vchans_configured: %u\n", info.nb_vchans);
+	fprintf(f, "  silent_mode: %s\n",
+		dev->data->dev_conf.enable_silent ? "on" : "off");
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
+
+int
+rte_dmadev_selftest(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
+	return (*dev->dev_ops->dev_selftest)(dev_id);
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 439ad95..a6fbce6 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -799,9 +799,21 @@ struct rte_dmadev_sge {
  *   - <0: Error code returned by the driver copy function.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
-		uint32_t length, uint64_t flags);
+		uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+#endif
+
+	return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
 
 /**
  * @warning
@@ -836,10 +848,23 @@ rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
  *   - <0: Error code returned by the driver copy scatter-gather list function.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
 		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
-		   uint64_t flags);
+		   uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+#endif
+
+	return (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);
+}
 
 /**
  * @warning
@@ -870,9 +895,21 @@ rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
  *   - <0: Error code returned by the driver fill function.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
-		rte_iova_t dst, uint32_t length, uint64_t flags);
+		rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+#endif
+
+	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
 
 /**
  * @warning
@@ -893,8 +930,20 @@ rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
  *   - <0: Failure to trigger hardware.
  */
 __rte_experimental
-int
-rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
+static inline int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+#endif
+
+	return (*dev->submit)(dev, vchan);
+}
 
 /**
  * @warning
@@ -920,9 +969,37 @@ rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
  *   must be less than or equal to the value of nb_cpls.
  */
 __rte_experimental
-uint16_t
+static inline uint16_t
 rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
-		     uint16_t *last_idx, bool *has_error);
+		     uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans || nb_cpls == 0)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
 
 /**
  * @warning
@@ -952,10 +1029,27 @@ rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
  *   status array are also set.
  */
 __rte_experimental
-uint16_t
+static inline uint16_t
 rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
 			    const uint16_t nb_cpls, uint16_t *last_idx,
-			    enum rte_dma_status_code *status);
+			    enum rte_dma_status_code *status)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.max_vchans ||
+	    nb_cpls == 0 || status == NULL)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
+}
 
 #ifdef __cplusplus
 }
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index 599ab15..9272725 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -177,4 +177,6 @@ struct rte_dmadev {
 	uint64_t reserved[2]; /**< Reserved for future fields. */
 } __rte_cache_aligned;
 
+extern struct rte_dmadev rte_dmadevices[];
+
 #endif /* _RTE_DMADEV_CORE_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index 408b93c..86c5e75 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -27,6 +27,7 @@ EXPERIMENTAL {
 INTERNAL {
         global:
 
+	rte_dmadevices;
 	rte_dmadev_get_device_by_name;
 	rte_dmadev_pmd_allocate;
 	rte_dmadev_pmd_release;
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v13 5/6] doc: add DMA device library guide
  2021-08-03 11:29 ` [dpdk-dev] [PATCH v13 0/6] support dmadev Chengwen Feng
                     ` (3 preceding siblings ...)
  2021-08-03 11:29   ` [dpdk-dev] [PATCH v13 4/6] dmadev: introduce DMA device library implementation Chengwen Feng
@ 2021-08-03 11:29   ` Chengwen Feng
  2021-08-03 14:55     ` Jerin Jacob
  2021-08-03 11:29   ` [dpdk-dev] [PATCH v13 6/6] maintainers: add for dmadev Chengwen Feng
  2021-08-03 11:46   ` [dpdk-dev] [PATCH v13 0/6] support dmadev fengchengwen
  6 siblings, 1 reply; 339+ messages in thread
From: Chengwen Feng @ 2021-08-03 11:29 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch adds dmadev library guide.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
 doc/guides/prog_guide/dmadev.rst        | 126 +++++++++++++++
 doc/guides/prog_guide/img/dmadev_i1.svg | 278 ++++++++++++++++++++++++++++++++
 doc/guides/prog_guide/index.rst         |   1 +
 3 files changed, 405 insertions(+)
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev_i1.svg

diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
new file mode 100644
index 0000000..c6327db
--- /dev/null
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -0,0 +1,126 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright 2021 HiSilicon Limited
+
+DMA Device Library
+====================
+
+The DMA library provides a DMA device framework for management and provisioning
+of hardware and software DMA poll mode drivers, defining generic APIs which
+support a number of different DMA operations.
+
+
+Design Principles
+-----------------
+
+The DMA library follows the same basic principles as those used in DPDK's
+Ethernet Device framework and the RegEx framework. The DMA framework provides
+a generic DMA device framework which supports both physical (hardware)
+and virtual (software) DMA devices as well as a generic DMA API which allows
+DMA devices to be managed and configured and supports DMA operations to be
+provisioned on DMA poll mode driver.
+
+.. _figure_dmadev_i1:
+
+.. figure:: img/dmadev_i1.*
+
+   The model of the DMA framework built on
+
+ * The DMA controller could have multiple hardware DMA channels (aka. hardware
+   DMA queues), each hardware DMA channel should be represented by a dmadev.
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+   channel represents a different transfer context. The DMA operation request
+   must be submitted to the virtual DMA channel. e.g. Application could create
+   virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+   virtual DMA channel 1 for memory-to-device transfer scenario.
+
+
+Device Management
+-----------------
+
+Device Creation
+~~~~~~~~~~~~~~~
+
+Physical DMA controller is discovered during the PCI probe/enumeration of the
+EAL function which is executed at DPDK initialization, based on their PCI
+device identifier, each unique PCI BDF (bus/bridge, device, function). Specific
+physical DMA controller, like other physical devices in DPDK can be listed using
+the EAL command line options.
+
+And then dmadevs are dynamically allocated by rte_dmadev_pmd_allocate() based on
+the number of hardware DMA channels.
+
+
+Device Identification
+~~~~~~~~~~~~~~~~~~~~~
+
+Each DMA device, whether physical or virtual is uniquely designated by two
+identifiers:
+
+- A unique device index used to designate the DMA device in all functions
+  exported by the DMA API.
+
+- A device name used to designate the DMA device in console messages, for
+  administration or debugging purposes.
+
+
+Device Configuration
+~~~~~~~~~~~~~~~~~~~~
+
+The rte_dmadev_configure API is used to configure a DMA device.
+
+.. code-block:: c
+
+   int rte_dmadev_configure(uint16_t dev_id,
+                            const struct rte_dmadev_conf *dev_conf);
+
+The ``rte_dmadev_conf`` structure is used to pass the configuration parameters
+for the DMA device for example maximum number of virtual DMA channels,
+indication of whether to enable silent mode.
+
+
+Configuration of Virtual DMA Channels
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The rte_dmadev_vchan_setup API is used to configure a virtual DMA channel.
+
+.. code-block:: c
+
+   int rte_dmadev_vchan_setup(uint16_t dev_id,
+		                      const struct rte_dmadev_vchan_conf *conf);
+
+The ``rte_dmadev_vchan_conf`` structure is used to pass the configuration
+parameters for the virtual DMA channel for example transfer direction, number of
+descriptor for the virtual DMA channel, source device access port parameter,
+destination device access port parameter.
+
+
+Device Features and Capabilities
+--------------------------------
+
+DMA devices may support different feature set. In order to get the supported PMD
+features ``rte_dmadev_info_get`` API which returns the info of the device and
+it's supported features.
+
+A special device capability is silent mode which application don't required to
+invoke dequeue APIs.
+
+
+Enqueue / Dequeue APIs
+~~~~~~~~~~~~~~~~~~~~~~
+
+The enqueue APIs include like ``rte_dmadev_copy`` and ``rte_dmadev_fill``, if
+enqueue successful, an uint16_t ring_idx is returned. This ring_idx can be used
+by applications to track per-operation metadata in an application defined
+circular ring.
+
+The ``rte_dmadev_submit`` API was used to issue doorbell to hardware, and also
+there are flags (``RTE_DMA_OP_FLAG_SUBMIT``) parameter of the enqueue APIs
+could do the same work.
+
+There are two dequeue APIs (``rte_dmadev_completed`` and
+``rte_dmadev_completed_status``) could used to obtain the result of request.
+The first API returns the number of operation requests completed successfully,
+the second API returns the number of operation requests completed which may
+successfully or failed and also with meaningful status code. Also these two
+APIs could return the last completed operation's ring_idx which will help to
+track application-defined circular ring.
diff --git a/doc/guides/prog_guide/img/dmadev_i1.svg b/doc/guides/prog_guide/img/dmadev_i1.svg
new file mode 100644
index 0000000..b305beb
--- /dev/null
+++ b/doc/guides/prog_guide/img/dmadev_i1.svg
@@ -0,0 +1,278 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   width="206.19344mm"
+   height="168.97479mm"
+   viewBox="0 0 206.19344 168.97479"
+   version="1.1"
+   id="svg934"
+   inkscape:version="1.1 (c68e22c387, 2021-05-23)"
+   sodipodi:docname="dmadev_i1.svg"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg">
+  <sodipodi:namedview
+     id="namedview936"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageshadow="2"
+     inkscape:pageopacity="0.0"
+     inkscape:pagecheckerboard="0"
+     inkscape:document-units="mm"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:showpageshadow="false"
+     inkscape:zoom="0.66635802"
+     inkscape:cx="396.93377"
+     inkscape:cy="480.22233"
+     inkscape:window-width="1920"
+     inkscape:window-height="1017"
+     inkscape:window-x="1914"
+     inkscape:window-y="-8"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="layer1" />
+  <defs
+     id="defs931">
+    <rect
+       x="342.43954"
+       y="106.56832"
+       width="58.257381"
+       height="137.82834"
+       id="rect17873" />
+  </defs>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-0.13857517,-21.527306)">
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9"
+       width="50"
+       height="28"
+       x="0.13857517"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1"
+       transform="translate(-49.110795,15.205683)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1277">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1279">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5"
+       width="50"
+       height="28"
+       x="60.820271"
+       y="21.69492"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4"
+       transform="translate(11.570899,15.373298)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1281">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1283">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5-3"
+       width="50"
+       height="28"
+       x="150.74168"
+       y="21.694923"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-8"
+       transform="translate(101.49231,15.373299)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1285">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1287">channel</tspan></text>
+    <text
+       xml:space="preserve"
+       transform="matrix(0.26458333,0,0,0.26458333,-0.04940429,21.408845)"
+       id="text17871"
+       style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;white-space:pre;shape-inside:url(#rect17873);fill:#000000;fill-opacity:1;stroke:none" />
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8"
+       width="38.34557"
+       height="19.729115"
+       x="35.854393"
+       y="79.215172"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3"
+       transform="translate(-13.394978,72.893551)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1289">dmadev</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0"
+       width="60.902534"
+       height="24.616455"
+       x="24.763887"
+       y="117.93796"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76"
+       transform="translate(-24.485484,111.61634)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1291">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1293">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-6"
+       width="60.902534"
+       height="24.616455"
+       x="145.42947"
+       y="117.74998"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-7"
+       transform="translate(96.180071,111.42836)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1295">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1297">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-4"
+       width="60.902534"
+       height="24.616455"
+       x="87.923386"
+       y="165.88565"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-4"
+       transform="translate(38.674008,159.56408)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1299">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1301">controller</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8-5"
+       width="38.34557"
+       height="19.729115"
+       x="156.87534"
+       y="79.215179"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-7"
+       transform="translate(107.62597,72.893552)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1303">dmadev</tspan></text>
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 77.744878,49.69492 60.71719,79.215172"
+       id="path45308"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 32.952235,49.527306 16.56935,29.687866"
+       id="path45310"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 55.072222,98.944286 0.08673,18.993674"
+       id="path45312"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8"
+       inkscape:connection-end="#rect31-9-5-8-0" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 176.00783,98.944294 -0.0768,18.805686"
+       id="path45320"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-5"
+       inkscape:connection-end="#rect31-9-5-8-0-6" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 161.17651,142.36643 -28.09763,23.51922"
+       id="path45586"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0-6"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 71.42827,142.55441 30.73327,23.33124"
+       id="path45588"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 175.82205,49.694923 0.16945,29.520256"
+       id="path45956"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-3"
+       inkscape:connection-end="#rect31-9-5-8-5" />
+  </g>
+</svg>
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 2dce507..0abea06 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -29,6 +29,7 @@ Programmer's Guide
     regexdev
     rte_security
     rawdev
+    dmadev
     link_bonding_poll_mode_drv_lib
     timer_lib
     hash_lib
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v13 6/6] maintainers: add for dmadev
  2021-08-03 11:29 ` [dpdk-dev] [PATCH v13 0/6] support dmadev Chengwen Feng
                     ` (4 preceding siblings ...)
  2021-08-03 11:29   ` [dpdk-dev] [PATCH v13 5/6] doc: add DMA device library guide Chengwen Feng
@ 2021-08-03 11:29   ` Chengwen Feng
  2021-08-03 11:46   ` [dpdk-dev] [PATCH v13 0/6] support dmadev fengchengwen
  6 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-03 11:29 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch add Chengwen Feng as dmadev's maintainer.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
 MAINTAINERS                            | 5 +++++
 doc/guides/rel_notes/release_21_08.rst | 6 ++++++
 2 files changed, 11 insertions(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index 8013ba1..84cfb1a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -496,6 +496,11 @@ F: drivers/raw/skeleton/
 F: app/test/test_rawdev.c
 F: doc/guides/prog_guide/rawdev.rst
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+F: doc/guides/prog_guide/dmadev.rst
+
 
 Memory Pool Drivers
 -------------------
diff --git a/doc/guides/rel_notes/release_21_08.rst b/doc/guides/rel_notes/release_21_08.rst
index 16bb9ce..93068a2 100644
--- a/doc/guides/rel_notes/release_21_08.rst
+++ b/doc/guides/rel_notes/release_21_08.rst
@@ -175,6 +175,12 @@ New Features
   Updated testpmd application to log errors and warnings to stderr
   instead of stdout used before.
 
+* **Added dmadev library support.**
+
+  The dmadev library provides a DMA device framework for management and
+  provisioning of hardware and software DMA poll mode drivers, defining generic
+  APIs which support a number of different DMA operations.
+
 
 Removed Items
 -------------
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v13 0/6] support dmadev
  2021-08-03 11:29 ` [dpdk-dev] [PATCH v13 0/6] support dmadev Chengwen Feng
                     ` (5 preceding siblings ...)
  2021-08-03 11:29   ` [dpdk-dev] [PATCH v13 6/6] maintainers: add for dmadev Chengwen Feng
@ 2021-08-03 11:46   ` fengchengwen
  6 siblings, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-08-03 11:46 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

@Bruce @Jerin @Morten

Could you please review 'doc: add DMA device library guide' patch ?
PS: other patchs are well reviewed.

Thanks

On 2021/8/3 19:29, Chengwen Feng wrote:
> This patch set contains six patch for new add dmadev.
> 
> Chengwen Feng (6):
>   dmadev: introduce DMA device library public APIs
>   dmadev: introduce DMA device library internal header
>   dmadev: introduce DMA device library PMD header
>   dmadev: introduce DMA device library implementation
>   doc: add DMA device library guide
>   maintainers: add for dmadev
> 
> ---
> v13:
> * add dmadev_i1.svg.
> * delete one unnecessary comment line of rte_dmadev_info_get.
> v12:
> * add max_sges filed for struct rte_dmadev_info.
> * add more descriptor of dmadev.rst.
> * replace scatter with scatter gather in code comment.
> * split to six patch.
> * fix typo.
> v11:
> * rename RTE_DMA_STATUS_UNKNOWN to RTE_DMA_STATUS_ERROR_UNKNOWN.
> * add RTE_DMA_STATUS_INVALID_ADDR marco.
> * update release-note.
> * add acked-by for 1/2 patch.
> * add dmadev programming guide which is 2/2 patch.
> v10:
> * fix rte_dmadev_completed_status comment.
> 
>  MAINTAINERS                             |    5 +
>  config/rte_config.h                     |    3 +
>  doc/api/doxy-api-index.md               |    1 +
>  doc/api/doxy-api.conf.in                |    1 +
>  doc/guides/prog_guide/dmadev.rst        |  126 ++++
>  doc/guides/prog_guide/img/dmadev_i1.svg |  278 ++++++++
>  doc/guides/prog_guide/index.rst         |    1 +
>  doc/guides/rel_notes/release_21_08.rst  |    6 +
>  lib/dmadev/meson.build                  |    7 +
>  lib/dmadev/rte_dmadev.c                 |  563 ++++++++++++++++
>  lib/dmadev/rte_dmadev.h                 | 1058 +++++++++++++++++++++++++++++++
>  lib/dmadev/rte_dmadev_core.h            |  182 ++++++
>  lib/dmadev/rte_dmadev_pmd.h             |   72 +++
>  lib/dmadev/version.map                  |   36 ++
>  lib/meson.build                         |    1 +
>  15 files changed, 2340 insertions(+)
>  create mode 100644 doc/guides/prog_guide/dmadev.rst
>  create mode 100644 doc/guides/prog_guide/img/dmadev_i1.svg
>  create mode 100644 lib/dmadev/meson.build
>  create mode 100644 lib/dmadev/rte_dmadev.c
>  create mode 100644 lib/dmadev/rte_dmadev.h
>  create mode 100644 lib/dmadev/rte_dmadev_core.h
>  create mode 100644 lib/dmadev/rte_dmadev_pmd.h
>  create mode 100644 lib/dmadev/version.map
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v13 5/6] doc: add DMA device library guide
  2021-08-03 11:29   ` [dpdk-dev] [PATCH v13 5/6] doc: add DMA device library guide Chengwen Feng
@ 2021-08-03 14:55     ` Jerin Jacob
  2021-08-05 13:15       ` fengchengwen
  0 siblings, 1 reply; 339+ messages in thread
From: Jerin Jacob @ 2021-08-03 14:55 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: Thomas Monjalon, Ferruh Yigit, Richardson, Bruce, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On Tue, Aug 3, 2021 at 5:03 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
>
> This patch adds dmadev library guide.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> ---
>  doc/guides/prog_guide/dmadev.rst        | 126 +++++++++++++++


doc build has following warning in my machine

ninja: Entering directory `build'
[2789/2813] Generating html_guides with a custom command
/export/dpdk.org/doc/guides/prog_guide/dmadev.rst:24: WARNING: Figure
caption must be a paragraph or empty comment.

.. figure:: img/dmadev_i1.*

   The model of the DMA framework built on

 * The DMA controller could have multiple hardware DMA channels (aka. hardware
   DMA queues), each hardware DMA channel should be represented by a dmadev.
 * The dmadev could create multiple virtual DMA channels, each virtual DMA
   channel represents a different transfer context. The DMA operation request
   must be submitted to the virtual DMA channel. e.g. Application could create
   virtual DMA channel 0 for memory-to-memory transfer scenario, and create
   virtual DMA channel 1 for memory-to-device transfer scenario.
[2813/2813] Linking target app/dpdk-test-pipeline

> new file mode 100644
> index 0000000..b305beb
> --- /dev/null
> +++ b/doc/guides/prog_guide/img/dmadev_i1.svg

why _i1 in the name?


> @@ -0,0 +1,278 @@
> +<?xml version="1.0" encoding="UTF-8" standalone="no"?>
> +<!-- Created with Inkscape (http://www.inkscape.org/) -->

You could add an SPDX license and your company copyright as well.
See other .svg files.


Rest looks good to me.


> +
> +<svg
> +   width="206.19344mm"
> +   height="168.97479mm"
> +   viewBox="0 0 206.19344 168.97479"
> +   version="1.1"
> +   id="svg934"
> +   inkscape:version="1.1 (c68e22c387, 2021-05-23)"
> +   sodipodi:docname="dmadev_i1.svg"
> +   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
> +   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
> +   xmlns="http://www.w3.org/2000/svg"
> +   xmlns:svg="http://www.w3.org/2000/svg">
> +  <sodipodi:namedview
> +     id="namedview936"
> +     pagecolor="#ffffff"
> +     bordercolor="#666666"
> +     borderopacity="1.0"
> +     inkscape:pageshadow="2"
> +     inkscape:pageopacity="0.0"
> +     inkscape:pagecheckerboard="0"
> +     inkscape:document-units="mm"
> +     showgrid="false"
> +     fit-margin-top="0"
> +     fit-margin-left="0"
> +     fit-margin-right="0"
> +     fit-margin-bottom="0"
> +     inkscape:showpageshadow="false"
> +     inkscape:zoom="0.66635802"
> +     inkscape:cx="396.93377"
> +     inkscape:cy="480.22233"
> +     inkscape:window-width="1920"
> +     inkscape:window-height="1017"
> +     inkscape:window-x="1914"
> +     inkscape:window-y="-8"
> +     inkscape:window-maximized="1"
> +     inkscape:current-layer="layer1" />
> +  <defs
> +     id="defs931">
> +    <rect
> +       x="342.43954"
> +       y="106.56832"
> +       width="58.257381"
> +       height="137.82834"
> +       id="rect17873" />
> +  </defs>
> +  <g
> +     inkscape:label="Layer 1"
> +     inkscape:groupmode="layer"
> +     id="layer1"
> +     transform="translate(-0.13857517,-21.527306)">
> +    <rect
> +       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
> +       id="rect31-9"
> +       width="50"
> +       height="28"
> +       x="0.13857517"
> +       y="21.527306"
> +       ry="0" />
> +    <text
> +       xml:space="preserve"
> +       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
> +       x="54.136707"
> +       y="18.045568"
> +       id="text803-1"
> +       transform="translate(-49.110795,15.205683)"><tspan
> +         x="54.136707"
> +         y="18.045568"
> +         id="tspan1277">virtual DMA </tspan><tspan
> +         x="54.136707"
> +         y="26.865018"
> +         id="tspan1279">channel</tspan></text>
> +    <rect
> +       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
> +       id="rect31-9-5"
> +       width="50"
> +       height="28"
> +       x="60.820271"
> +       y="21.69492"
> +       ry="0" />
> +    <text
> +       xml:space="preserve"
> +       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
> +       x="54.136707"
> +       y="18.045568"
> +       id="text803-1-4"
> +       transform="translate(11.570899,15.373298)"><tspan
> +         x="54.136707"
> +         y="18.045568"
> +         id="tspan1281">virtual DMA </tspan><tspan
> +         x="54.136707"
> +         y="26.865018"
> +         id="tspan1283">channel</tspan></text>
> +    <rect
> +       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
> +       id="rect31-9-5-3"
> +       width="50"
> +       height="28"
> +       x="150.74168"
> +       y="21.694923"
> +       ry="0" />
> +    <text
> +       xml:space="preserve"
> +       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
> +       x="54.136707"
> +       y="18.045568"
> +       id="text803-1-4-8"
> +       transform="translate(101.49231,15.373299)"><tspan
> +         x="54.136707"
> +         y="18.045568"
> +         id="tspan1285">virtual DMA </tspan><tspan
> +         x="54.136707"
> +         y="26.865018"
> +         id="tspan1287">channel</tspan></text>
> +    <text
> +       xml:space="preserve"
> +       transform="matrix(0.26458333,0,0,0.26458333,-0.04940429,21.408845)"
> +       id="text17871"
> +       style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;white-space:pre;shape-inside:url(#rect17873);fill:#000000;fill-opacity:1;stroke:none" />
> +    <rect
> +       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
> +       id="rect31-9-5-8"
> +       width="38.34557"
> +       height="19.729115"
> +       x="35.854393"
> +       y="79.215172"
> +       ry="0" />
> +    <text
> +       xml:space="preserve"
> +       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
> +       x="54.136707"
> +       y="18.045568"
> +       id="text803-1-4-3"
> +       transform="translate(-13.394978,72.893551)"><tspan
> +         x="54.136707"
> +         y="18.045568"
> +         id="tspan1289">dmadev</tspan></text>
> +    <rect
> +       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
> +       id="rect31-9-5-8-0"
> +       width="60.902534"
> +       height="24.616455"
> +       x="24.763887"
> +       y="117.93796"
> +       ry="0" />
> +    <text
> +       xml:space="preserve"
> +       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
> +       x="54.136707"
> +       y="18.045568"
> +       id="text803-1-4-3-76"
> +       transform="translate(-24.485484,111.61634)"><tspan
> +         x="54.136707"
> +         y="18.045568"
> +         id="tspan1291">hardware DMA </tspan><tspan
> +         x="54.136707"
> +         y="26.865018"
> +         id="tspan1293">channel</tspan></text>
> +    <rect
> +       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
> +       id="rect31-9-5-8-0-6"
> +       width="60.902534"
> +       height="24.616455"
> +       x="145.42947"
> +       y="117.74998"
> +       ry="0" />
> +    <text
> +       xml:space="preserve"
> +       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
> +       x="54.136707"
> +       y="18.045568"
> +       id="text803-1-4-3-76-7"
> +       transform="translate(96.180071,111.42836)"><tspan
> +         x="54.136707"
> +         y="18.045568"
> +         id="tspan1295">hardware DMA </tspan><tspan
> +         x="54.136707"
> +         y="26.865018"
> +         id="tspan1297">channel</tspan></text>
> +    <rect
> +       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
> +       id="rect31-9-5-8-0-4"
> +       width="60.902534"
> +       height="24.616455"
> +       x="87.923386"
> +       y="165.88565"
> +       ry="0" />
> +    <text
> +       xml:space="preserve"
> +       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
> +       x="54.136707"
> +       y="18.045568"
> +       id="text803-1-4-3-76-4"
> +       transform="translate(38.674008,159.56408)"><tspan
> +         x="54.136707"
> +         y="18.045568"
> +         id="tspan1299">hardware DMA </tspan><tspan
> +         x="54.136707"
> +         y="26.865018"
> +         id="tspan1301">controller</tspan></text>
> +    <rect
> +       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
> +       id="rect31-9-5-8-5"
> +       width="38.34557"
> +       height="19.729115"
> +       x="156.87534"
> +       y="79.215179"
> +       ry="0" />
> +    <text
> +       xml:space="preserve"
> +       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
> +       x="54.136707"
> +       y="18.045568"
> +       id="text803-1-4-3-7"
> +       transform="translate(107.62597,72.893552)"><tspan
> +         x="54.136707"
> +         y="18.045568"
> +         id="tspan1303">dmadev</tspan></text>
> +    <path
> +       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
> +       d="M 77.744878,49.69492 60.71719,79.215172"
> +       id="path45308"
> +       inkscape:connector-type="polyline"
> +       inkscape:connector-curvature="0"
> +       inkscape:connection-start="#rect31-9-5"
> +       inkscape:connection-end="#rect31-9-5-8" />
> +    <path
> +       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
> +       d="m 32.952235,49.527306 16.56935,29.687866"
> +       id="path45310"
> +       inkscape:connector-type="polyline"
> +       inkscape:connector-curvature="0"
> +       inkscape:connection-start="#rect31-9"
> +       inkscape:connection-end="#rect31-9-5-8" />
> +    <path
> +       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
> +       d="m 55.072222,98.944286 0.08673,18.993674"
> +       id="path45312"
> +       inkscape:connector-type="polyline"
> +       inkscape:connector-curvature="0"
> +       inkscape:connection-start="#rect31-9-5-8"
> +       inkscape:connection-end="#rect31-9-5-8-0" />
> +    <path
> +       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
> +       d="m 176.00783,98.944294 -0.0768,18.805686"
> +       id="path45320"
> +       inkscape:connector-type="polyline"
> +       inkscape:connector-curvature="0"
> +       inkscape:connection-start="#rect31-9-5-8-5"
> +       inkscape:connection-end="#rect31-9-5-8-0-6" />
> +    <path
> +       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
> +       d="m 161.17651,142.36643 -28.09763,23.51922"
> +       id="path45586"
> +       inkscape:connector-type="polyline"
> +       inkscape:connector-curvature="0"
> +       inkscape:connection-start="#rect31-9-5-8-0-6"
> +       inkscape:connection-end="#rect31-9-5-8-0-4" />
> +    <path
> +       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
> +       d="m 71.42827,142.55441 30.73327,23.33124"
> +       id="path45588"
> +       inkscape:connector-type="polyline"
> +       inkscape:connector-curvature="0"
> +       inkscape:connection-start="#rect31-9-5-8-0"
> +       inkscape:connection-end="#rect31-9-5-8-0-4" />
> +    <path
> +       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
> +       d="m 175.82205,49.694923 0.16945,29.520256"
> +       id="path45956"
> +       inkscape:connector-type="polyline"
> +       inkscape:connector-curvature="0"
> +       inkscape:connection-start="#rect31-9-5-3"
> +       inkscape:connection-end="#rect31-9-5-8-5" />
> +  </g>
> +</svg>
> diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
> index 2dce507..0abea06 100644
> --- a/doc/guides/prog_guide/index.rst
> +++ b/doc/guides/prog_guide/index.rst
> @@ -29,6 +29,7 @@ Programmer's Guide
>      regexdev
>      rte_security
>      rawdev
> +    dmadev
>      link_bonding_poll_mode_drv_lib
>      timer_lib
>      hash_lib
> --
> 2.8.1
>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v13 4/6] dmadev: introduce DMA device library implementation
  2021-08-03 11:29   ` [dpdk-dev] [PATCH v13 4/6] dmadev: introduce DMA device library implementation Chengwen Feng
@ 2021-08-05 12:56     ` Walsh, Conor
  2021-08-05 13:12       ` fengchengwen
  0 siblings, 1 reply; 339+ messages in thread
From: Walsh, Conor @ 2021-08-05 12:56 UTC (permalink / raw)
  To: Chengwen Feng, thomas, Yigit, Ferruh, Richardson,  Bruce, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor, Ananyev,
	Konstantin

> This patch introduce DMA device library implementation which includes
> configuration and I/O with the DMA devices.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> ---
>  config/rte_config.h          |   3 +
>  lib/dmadev/meson.build       |   1 +
>  lib/dmadev/rte_dmadev.c      | 563
> +++++++++++++++++++++++++++++++++++++++++++
>  lib/dmadev/rte_dmadev.h      | 118 ++++++++-
>  lib/dmadev/rte_dmadev_core.h |   2 +
>  lib/dmadev/version.map       |   1 +
>  6 files changed, 676 insertions(+), 12 deletions(-)
>  create mode 100644 lib/dmadev/rte_dmadev.c
> 
> diff --git a/config/rte_config.h b/config/rte_config.h
> index 590903c..331a431 100644
> --- a/config/rte_config.h
> +++ b/config/rte_config.h
> @@ -81,6 +81,9 @@
>  /* rawdev defines */
>  #define RTE_RAWDEV_MAX_DEVS 64
> 
> +/* dmadev defines */
> +#define RTE_DMADEV_MAX_DEVS 64
> +
>  /* ip_fragmentation defines */
>  #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
>  #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
> diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
> index 833baf7..d2fc85e 100644
> --- a/lib/dmadev/meson.build
> +++ b/lib/dmadev/meson.build
> @@ -1,6 +1,7 @@
>  # SPDX-License-Identifier: BSD-3-Clause
>  # Copyright(c) 2021 HiSilicon Limited.
> 
> +sources = files('rte_dmadev.c')
>  headers = files('rte_dmadev.h')
>  indirect_headers += files('rte_dmadev_core.h')
>  driver_sdk_headers += files('rte_dmadev_pmd.h')
> diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
> new file mode 100644
> index 0000000..b4f5498
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev.c
> @@ -0,0 +1,563 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + * Copyright(c) 2021 Intel Corporation.
> + */
> +
> +#include <ctype.h>
> +#include <inttypes.h>
> +#include <stdint.h>
> +#include <stdio.h>
> +#include <stdlib.h>
> +#include <string.h>
> +
> +#include <rte_debug.h>
> +#include <rte_dev.h>
> +#include <rte_eal.h>
> +#include <rte_errno.h>
> +#include <rte_lcore.h>
> +#include <rte_log.h>
> +#include <rte_memory.h>
> +#include <rte_memzone.h>
> +#include <rte_malloc.h>
> +#include <rte_string_fns.h>
> +
> +#include "rte_dmadev.h"
> +#include "rte_dmadev_pmd.h"
> +
> +struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
> +
> +static const char *mz_rte_dmadev_data = "rte_dmadev_data";
> +/* Shared memory between primary and secondary processes. */
> +static struct {
> +	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
> +} *dmadev_shared_data;
> +
> +RTE_LOG_REGISTER_DEFAULT(rte_dmadev_logtype, INFO);
> +#define RTE_DMADEV_LOG(level, ...) \
> +	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "" __VA_ARGS__)
> +
> +/* Macros to check for valid device id */
> +#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
> +	if (!rte_dmadev_is_valid_dev(dev_id)) { \
> +		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
> +		return retval; \
> +	} \
> +} while (0)
> +
> +static int
> +dmadev_check_name(const char *name)
> +{
> +	size_t name_len;
> +
> +	if (name == NULL) {
> +		RTE_DMADEV_LOG(ERR, "Name can't be NULL\n");
> +		return -EINVAL;
> +	}
> +
> +	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
> +	if (name_len == 0) {
> +		RTE_DMADEV_LOG(ERR, "Zero length DMA device
> name\n");
> +		return -EINVAL;
> +	}
> +	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
> +		RTE_DMADEV_LOG(ERR, "DMA device name is too long\n");
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static uint16_t
> +dmadev_find_free_dev(void)
> +{
> +	uint16_t i;
> +
> +	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
> +		if (dmadev_shared_data->data[i].dev_name[0] == '\0')
> +			return i;
> +	}
> +
> +	return RTE_DMADEV_MAX_DEVS;
> +}
> +
> +static struct rte_dmadev*
> +dmadev_find(const char *name)
> +{
> +	uint16_t i;
> +
> +	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
> +		if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
> &&
> +		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
> +			return &rte_dmadevices[i];
> +	}
> +
> +	return NULL;
> +}
> +
> +static int
> +dmadev_shared_data_prepare(void)
> +{
> +	const struct rte_memzone *mz;
> +
> +	if (dmadev_shared_data == NULL) {
> +		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
> +			/* Allocate port data and ownership shared memory.
> */
> +			mz = rte_memzone_reserve(mz_rte_dmadev_data,
> +					 sizeof(*dmadev_shared_data),
> +					 rte_socket_id(), 0);
> +		} else
> +			mz = rte_memzone_lookup(mz_rte_dmadev_data);
> +		if (mz == NULL)
> +			return -ENOMEM;
> +
> +		dmadev_shared_data = mz->addr;
> +		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
> +			memset(dmadev_shared_data->data, 0,
> +			       sizeof(dmadev_shared_data->data));
> +	}
> +
> +	return 0;
> +}
> +
> +static struct rte_dmadev *
> +dmadev_allocate(const char *name)
> +{
> +	struct rte_dmadev *dev;
> +	uint16_t dev_id;
> +
> +	dev = dmadev_find(name);
> +	if (dev != NULL) {
> +		RTE_DMADEV_LOG(ERR, "DMA device already allocated\n");
> +		return NULL;
> +	}
> +
> +	if (dmadev_shared_data_prepare() != 0) {
> +		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared
> data\n");
> +		return NULL;
> +	}
> +
> +	dev_id = dmadev_find_free_dev();
> +	if (dev_id == RTE_DMADEV_MAX_DEVS) {
> +		RTE_DMADEV_LOG(ERR, "Reached maximum number of
> DMA devices\n");
> +		return NULL;
> +	}
> +
> +	dev = &rte_dmadevices[dev_id];
> +	dev->data = &dmadev_shared_data->data[dev_id];
> +	dev->data->dev_id = dev_id;
> +	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data-
> >dev_name));
> +
> +	return dev;
> +}
> +
> +static struct rte_dmadev *
> +dmadev_attach_secondary(const char *name)
> +{
> +	struct rte_dmadev *dev;
> +	uint16_t i;
> +
> +	if (dmadev_shared_data_prepare() != 0) {
> +		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared
> data\n");
> +		return NULL;
> +	}
> +
> +	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
> +		if (!strcmp(dmadev_shared_data->data[i].dev_name,
> name))
> +			break;
> +	}
> +	if (i == RTE_DMADEV_MAX_DEVS) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %s is not driven by the primary process\n",
> +			name);
> +		return NULL;
> +	}
> +
> +	dev = &rte_dmadevices[i];
> +	dev->data = &dmadev_shared_data->data[i];
> +	dev->dev_private = dev->data->dev_private;
> +
> +	return dev;
> +}
> +
> +struct rte_dmadev *
> +rte_dmadev_pmd_allocate(const char *name)
> +{
> +	struct rte_dmadev *dev;
> +
> +	if (dmadev_check_name(name) != 0)
> +		return NULL;
> +
> +	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
> +		dev = dmadev_allocate(name);
> +	else
> +		dev = dmadev_attach_secondary(name);
> +
> +	if (dev == NULL)
> +		return NULL;
> +	dev->state = RTE_DMADEV_ATTACHED;
> +
> +	return dev;
> +}
> +
> +int
> +rte_dmadev_pmd_release(struct rte_dmadev *dev)
> +{
> +	void *dev_private_tmp;
> +
> +	if (dev == NULL)
> +		return -EINVAL;
> +
> +	if (dev->state == RTE_DMADEV_UNUSED)
> +		return 0;
> +
> +	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
> +		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
> +
> +	dev_private_tmp = dev->dev_private;
> +	memset(dev, 0, sizeof(struct rte_dmadev));
> +	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
> +		dev->dev_private = dev_private_tmp;
> +	dev->state = RTE_DMADEV_UNUSED;
> +
> +	return 0;
> +}
> +
> +struct rte_dmadev *
> +rte_dmadev_get_device_by_name(const char *name)
> +{
> +	if (dmadev_check_name(name) != 0)
> +		return NULL;
> +	return dmadev_find(name);
> +}
> +
> +int
> +rte_dmadev_get_dev_id(const char *name)
> +{
> +	struct rte_dmadev *dev =
> rte_dmadev_get_device_by_name(name);
> +	if (dev != NULL)
> +		return dev->data->dev_id;
> +	return -EINVAL;
> +}
> +
> +bool
> +rte_dmadev_is_valid_dev(uint16_t dev_id)
> +{
> +	return (dev_id < RTE_DMADEV_MAX_DEVS) &&
> +		rte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED;
> +}
> +
> +uint16_t
> +rte_dmadev_count(void)
> +{
> +	uint16_t count = 0;
> +	uint16_t i;
> +
> +	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
> +		if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
> +			count++;
> +	}
> +
> +	return count;
> +}
> +
> +int
> +rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
> +{
> +	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +	int ret;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	if (dev_info == NULL)
> +		return -EINVAL;
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -
> ENOTSUP);
> +	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
> +	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
> +					    sizeof(struct rte_dmadev_info));
> +	if (ret != 0)
> +		return ret;
> +
> +	dev_info->device = dev->device;
> +	dev_info->nb_vchans = dev->data->dev_conf.max_vchans;
> +
> +	return 0;
> +}
> +
> +int
> +rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf
> *dev_conf)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +	struct rte_dmadev_info info;
> +	int ret;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	if (dev_conf == NULL)
> +		return -EINVAL;
> +
> +	if (dev->data->dev_started != 0) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u must be stopped to allow
> configuration\n",
> +			dev_id);
> +		return -EBUSY;
> +	}
> +
> +	ret = rte_dmadev_info_get(dev_id, &info);
> +	if (ret != 0) {
> +		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n",
> dev_id);
> +		return -EINVAL;
> +	}
> +	if (dev_conf->max_vchans == 0) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u configure zero vchans\n", dev_id);
> +		return -EINVAL;
> +	}
> +	if (dev_conf->max_vchans > info.max_vchans) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u configure too many vchans\n", dev_id);
> +		return -EINVAL;
> +	}
> +	if (dev_conf->enable_silent &&
> +	    !(info.dev_capa & RTE_DMADEV_CAPA_SILENT)) {
> +		RTE_DMADEV_LOG(ERR, "Device %u don't support silent\n",
> dev_id);
> +		return -EINVAL;
> +	}
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -
> ENOTSUP);
> +	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf);
> +	if (ret == 0)
> +		memcpy(&dev->data->dev_conf, dev_conf,
> sizeof(*dev_conf));
> +
> +	return ret;
> +}
> +
> +int
> +rte_dmadev_start(uint16_t dev_id)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +	int ret;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +	if (dev->data->dev_started != 0) {
> +		RTE_DMADEV_LOG(WARNING, "Device %u already
> started\n", dev_id);
> +		return 0;
> +	}
> +
> +	if (dev->dev_ops->dev_start == NULL)
> +		goto mark_started;
> +
> +	ret = (*dev->dev_ops->dev_start)(dev);
> +	if (ret != 0)
> +		return ret;
> +
> +mark_started:
> +	dev->data->dev_started = 1;
> +	return 0;
> +}
> +
> +int
> +rte_dmadev_stop(uint16_t dev_id)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +	int ret;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +	if (dev->data->dev_started == 0) {
> +		RTE_DMADEV_LOG(WARNING, "Device %u already
> stopped\n", dev_id);
> +		return 0;
> +	}
> +
> +	if (dev->dev_ops->dev_stop == NULL)
> +		goto mark_stopped;
> +
> +	ret = (*dev->dev_ops->dev_stop)(dev);
> +	if (ret != 0)
> +		return ret;
> +
> +mark_stopped:
> +	dev->data->dev_started = 0;
> +	return 0;
> +}
> +
> +int
> +rte_dmadev_close(uint16_t dev_id)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +
> +	/* Device must be stopped before it can be closed */
> +	if (dev->data->dev_started == 1) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u must be stopped before closing\n",
> dev_id);
> +		return -EBUSY;
> +	}
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -
> ENOTSUP);
> +	return (*dev->dev_ops->dev_close)(dev);
> +}
> +
> +int
> +rte_dmadev_vchan_setup(uint16_t dev_id,
> +		       const struct rte_dmadev_vchan_conf *conf)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +	struct rte_dmadev_info info;
> +	bool src_is_dev, dst_is_dev;
> +	int ret;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	if (conf == NULL)
> +		return -EINVAL;
> +
> +	if (dev->data->dev_started != 0) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u must be stopped to allow
> configuration\n",
> +			dev_id);
> +		return -EBUSY;
> +	}
> +
> +	ret = rte_dmadev_info_get(dev_id, &info);
> +	if (ret != 0) {
> +		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n",
> dev_id);
> +		return -EINVAL;
> +	}
> +	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
> +	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
> +	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
> +	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
> +		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!\n",
> dev_id);
> +		return -EINVAL;
> +	}
> +	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
> +	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_MEM)) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u don't support mem2mem transfer\n",
> dev_id);
> +		return -EINVAL;
> +	}
> +	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
> +	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_DEV)) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u don't support mem2dev transfer\n",
> dev_id);
> +		return -EINVAL;
> +	}
> +	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
> +	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_MEM)) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u don't support dev2mem transfer\n",
> dev_id);
> +		return -EINVAL;
> +	}
> +	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
> +	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_DEV)) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u don't support dev2dev transfer\n",
> dev_id);
> +		return -EINVAL;
> +	}
> +	if (conf->nb_desc < info.min_desc || conf->nb_desc >
> info.max_desc) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u number of descriptors invalid\n",
> dev_id);
> +		return -EINVAL;
> +	}
> +	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
> +		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
> +	if ((conf->src_port.port_type == RTE_DMADEV_PORT_NONE &&
> src_is_dev) ||
> +	    (conf->src_port.port_type != RTE_DMADEV_PORT_NONE &&
> !src_is_dev)) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u source port type invalid\n", dev_id);
> +		return -EINVAL;
> +	}
> +	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
> +		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
> +	if ((conf->dst_port.port_type == RTE_DMADEV_PORT_NONE &&
> dst_is_dev) ||
> +	    (conf->dst_port.port_type != RTE_DMADEV_PORT_NONE &&
> !dst_is_dev)) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u destination port type invalid\n", dev_id);
> +		return -EINVAL;
> +	}
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -
> ENOTSUP);
> +	return (*dev->dev_ops->vchan_setup)(dev, conf);
> +}
> +
> +int
> +rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
> +		     struct rte_dmadev_stats *stats)
> +{
> +	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	if (stats == NULL)
> +		return -EINVAL;
> +	if (vchan >= dev->data->dev_conf.max_vchans &&
> +	    vchan != RTE_DMADEV_ALL_VCHAN) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u vchan %u out of range\n", dev_id,
> vchan);
> +		return -EINVAL;
> +	}
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -
> ENOTSUP);
> +	memset(stats, 0, sizeof(struct rte_dmadev_stats));
> +	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
> +					  sizeof(struct rte_dmadev_stats));
> +}
> +
> +int
> +rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	if (vchan >= dev->data->dev_conf.max_vchans &&
> +	    vchan != RTE_DMADEV_ALL_VCHAN) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u vchan %u out of range\n", dev_id,
> vchan);
> +		return -EINVAL;
> +	}
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -
> ENOTSUP);
> +	return (*dev->dev_ops->stats_reset)(dev, vchan);
> +}
> +
> +int
> +rte_dmadev_dump(uint16_t dev_id, FILE *f)
> +{
> +	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +	struct rte_dmadev_info info;
> +	int ret;
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	if (f == NULL)
> +		return -EINVAL;
> +
> +	ret = rte_dmadev_info_get(dev_id, &info);
> +	if (ret != 0) {
> +		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n",
> dev_id);
> +		return -EINVAL;
> +	}
> +
> +	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
> +		dev->data->dev_id,
> +		dev->data->dev_name,
> +		dev->data->dev_started ? "started" : "stopped");
> +	fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
> +	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
> +	fprintf(f, "  max_vchans_configured: %u\n", info.nb_vchans);
> +	fprintf(f, "  silent_mode: %s\n",
> +		dev->data->dev_conf.enable_silent ? "on" : "off");
> +
> +	if (dev->dev_ops->dev_dump != NULL)
> +		return (*dev->dev_ops->dev_dump)(dev, f);
> +
> +	return 0;
> +}
> +
> +int
> +rte_dmadev_selftest(uint16_t dev_id)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -
> ENOTSUP);
> +	return (*dev->dev_ops->dev_selftest)(dev_id);
> +}
> diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
> index 439ad95..a6fbce6 100644
> --- a/lib/dmadev/rte_dmadev.h
> +++ b/lib/dmadev/rte_dmadev.h
> @@ -799,9 +799,21 @@ struct rte_dmadev_sge {
>   *   - <0: Error code returned by the driver copy function.
>   */
>  __rte_experimental
> -int
> +static inline int
>  rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src,
> rte_iova_t dst,
> -		uint32_t length, uint64_t flags);
> +		uint32_t length, uint64_t flags)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +#ifdef RTE_DMADEV_DEBUG
> +	if (!rte_dmadev_is_valid_dev(dev_id) ||
> +	    vchan >= dev->data->dev_conf.max_vchans || length == 0)
> +		return -EINVAL;
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
> +#endif
> +
> +	return (*dev->copy)(dev, vchan, src, dst, length, flags);
> +}
> 
>  /**
>   * @warning
> @@ -836,10 +848,23 @@ rte_dmadev_copy(uint16_t dev_id, uint16_t vchan,
> rte_iova_t src, rte_iova_t dst,
>   *   - <0: Error code returned by the driver copy scatter-gather list function.
>   */
>  __rte_experimental
> -int
> +static inline int
>  rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct
> rte_dmadev_sge *src,
>  		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t
> nb_dst,
> -		   uint64_t flags);
> +		   uint64_t flags)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +#ifdef RTE_DMADEV_DEBUG
> +	if (!rte_dmadev_is_valid_dev(dev_id) ||
> +	    vchan >= dev->data->dev_conf.max_vchans ||
> +	    src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
> +		return -EINVAL;
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
> +#endif
> +
> +	return (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);
> +}
> 
>  /**
>   * @warning
> @@ -870,9 +895,21 @@ rte_dmadev_copy_sg(uint16_t dev_id, uint16_t
> vchan, struct rte_dmadev_sge *src,
>   *   - <0: Error code returned by the driver fill function.
>   */
>  __rte_experimental
> -int
> +static inline int
>  rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
> -		rte_iova_t dst, uint32_t length, uint64_t flags);
> +		rte_iova_t dst, uint32_t length, uint64_t flags)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +#ifdef RTE_DMADEV_DEBUG
> +	if (!rte_dmadev_is_valid_dev(dev_id) ||
> +	    vchan >= dev->data->dev_conf.max_vchans || length == 0)
> +		return -EINVAL;
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
> +#endif
> +
> +	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
> +}
> 
>  /**
>   * @warning
> @@ -893,8 +930,20 @@ rte_dmadev_fill(uint16_t dev_id, uint16_t vchan,
> uint64_t pattern,
>   *   - <0: Failure to trigger hardware.
>   */
>  __rte_experimental
> -int
> -rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
> +static inline int
> +rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +#ifdef RTE_DMADEV_DEBUG
> +	if (!rte_dmadev_is_valid_dev(dev_id) ||
> +	    vchan >= dev->data->dev_conf.max_vchans)
> +		return -EINVAL;
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
> +#endif
> +
> +	return (*dev->submit)(dev, vchan);
> +}
> 
>  /**
>   * @warning
> @@ -920,9 +969,37 @@ rte_dmadev_submit(uint16_t dev_id, uint16_t
> vchan);
>   *   must be less than or equal to the value of nb_cpls.
>   */
>  __rte_experimental
> -uint16_t
> +static inline uint16_t
>  rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t
> nb_cpls,
> -		     uint16_t *last_idx, bool *has_error);
> +		     uint16_t *last_idx, bool *has_error)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +	uint16_t idx;
> +	bool err;
> +
> +#ifdef RTE_DMADEV_DEBUG
> +	if (!rte_dmadev_is_valid_dev(dev_id) ||
> +	    vchan >= dev->data->dev_conf.max_vchans || nb_cpls == 0)
> +		return 0;
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
> +#endif
> +
> +	/* Ensure the pointer values are non-null to simplify drivers.
> +	 * In most cases these should be compile time evaluated, since this is
> +	 * an inline function.
> +	 * - If NULL is explicitly passed as parameter, then compiler knows
> the
> +	 *   value is NULL
> +	 * - If address of local variable is passed as parameter, then compiler
> +	 *   can know it's non-NULL.
> +	 */
> +	if (last_idx == NULL)
> +		last_idx = &idx;
> +	if (has_error == NULL)
> +		has_error = &err;
> +
> +	*has_error = false;
> +	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
> +}
> 
>  /**
>   * @warning
> @@ -952,10 +1029,27 @@ rte_dmadev_completed(uint16_t dev_id,
> uint16_t vchan, const uint16_t nb_cpls,
>   *   status array are also set.
>   */

Hi Chenwen,

When completed status is called with status set to NULL the drivers will segfault.
Users may have a valid use case where they pass NULL as status so it needs to be
checked and handled appropriately.
Could you handle this within dmadev similar to what I've added below?
If added the doxygen comment will also need to be updated to specify NULL as a valid input.

Thanks,
Conor.

>  __rte_experimental
> -uint16_t
> +static inline uint16_t
>  rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
>  			    const uint16_t nb_cpls, uint16_t *last_idx,
> -			    enum rte_dma_status_code *status);
> +			    enum rte_dma_status_code *status)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +	uint16_t idx;
               enum rte_dma_status_code *status_tmp;
> +
> +#ifdef RTE_DMADEV_DEBUG
> +	if (!rte_dmadev_is_valid_dev(dev_id) ||
> +	    vchan >= dev->data->dev_conf.max_vchans ||
> +	    nb_cpls == 0 || status == NULL)
> +		return 0;
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
> +#endif
> +
> +	if (last_idx == NULL)
> +		last_idx = &idx;
               if (status == NULL)
                             status = &status_tmp;
> +
> +	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx,
> status);
> +}
> 
>  #ifdef __cplusplus
>  }
> diff --git a/lib/dmadev/rte_dmadev_core.h
> b/lib/dmadev/rte_dmadev_core.h
> index 599ab15..9272725 100644
> --- a/lib/dmadev/rte_dmadev_core.h
> +++ b/lib/dmadev/rte_dmadev_core.h
> @@ -177,4 +177,6 @@ struct rte_dmadev {
>  	uint64_t reserved[2]; /**< Reserved for future fields. */
>  } __rte_cache_aligned;
> 
> +extern struct rte_dmadev rte_dmadevices[];
> +
>  #endif /* _RTE_DMADEV_CORE_H_ */
> diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
> index 408b93c..86c5e75 100644
> --- a/lib/dmadev/version.map
> +++ b/lib/dmadev/version.map
> @@ -27,6 +27,7 @@ EXPERIMENTAL {
>  INTERNAL {
>          global:
> 
> +	rte_dmadevices;
>  	rte_dmadev_get_device_by_name;
>  	rte_dmadev_pmd_allocate;
>  	rte_dmadev_pmd_release;
> --
> 2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v13 4/6] dmadev: introduce DMA device library implementation
  2021-08-05 12:56     ` Walsh, Conor
@ 2021-08-05 13:12       ` fengchengwen
  2021-08-05 13:44         ` Conor Walsh
  0 siblings, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-08-05 13:12 UTC (permalink / raw)
  To: Walsh, Conor, thomas, Yigit, Ferruh, Richardson,  Bruce, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor, Ananyev,
	Konstantin

On 2021/8/5 20:56, Walsh, Conor wrote:
>> This patch introduce DMA device library implementation which includes
>> configuration and I/O with the DMA devices.

[snip]

>>
>>  /**
>>   * @warning
>> @@ -952,10 +1029,27 @@ rte_dmadev_completed(uint16_t dev_id,
>> uint16_t vchan, const uint16_t nb_cpls,
>>   *   status array are also set.
>>   */
> 
> Hi Chenwen,
> 
> When completed status is called with status set to NULL the drivers will segfault.
> Users may have a valid use case where they pass NULL as status so it needs to be
> checked and handled appropriately.
> Could you handle this within dmadev similar to what I've added below?
> If added the doxygen comment will also need to be updated to specify NULL as a valid input.

Hi Conor,

The status must be an array pointer, so below status_tmp will not work well.

This API is slow path (vs completed API), and is designed to obtain detailed
status information, so application should pass valid status parameters.

> 
> Thanks,
> Conor.
> 
>>  __rte_experimental
>> -uint16_t
>> +static inline uint16_t
>>  rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
>>  			    const uint16_t nb_cpls, uint16_t *last_idx,
>> -			    enum rte_dma_status_code *status);
>> +			    enum rte_dma_status_code *status)
>> +{
>> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
>> +	uint16_t idx;
>                enum rte_dma_status_code *status_tmp;
>> +
>> +#ifdef RTE_DMADEV_DEBUG
>> +	if (!rte_dmadev_is_valid_dev(dev_id) ||
>> +	    vchan >= dev->data->dev_conf.max_vchans ||
>> +	    nb_cpls == 0 || status == NULL)
>> +		return 0;
>> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
>> +#endif
>> +
>> +	if (last_idx == NULL)
>> +		last_idx = &idx;
>                if (status == NULL)
>                              status = &status_tmp;
>> +
>> +	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx,
>> status);
>> +}
>>
>>  #ifdef __cplusplus
>>  }
>> diff --git a/lib/dmadev/rte_dmadev_core.h
>> b/lib/dmadev/rte_dmadev_core.h
>> index 599ab15..9272725 100644
>> --- a/lib/dmadev/rte_dmadev_core.h
>> +++ b/lib/dmadev/rte_dmadev_core.h
>> @@ -177,4 +177,6 @@ struct rte_dmadev {
>>  	uint64_t reserved[2]; /**< Reserved for future fields. */
>>  } __rte_cache_aligned;
>>
>> +extern struct rte_dmadev rte_dmadevices[];
>> +
>>  #endif /* _RTE_DMADEV_CORE_H_ */
>> diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
>> index 408b93c..86c5e75 100644
>> --- a/lib/dmadev/version.map
>> +++ b/lib/dmadev/version.map
>> @@ -27,6 +27,7 @@ EXPERIMENTAL {
>>  INTERNAL {
>>          global:
>>
>> +	rte_dmadevices;
>>  	rte_dmadev_get_device_by_name;
>>  	rte_dmadev_pmd_allocate;
>>  	rte_dmadev_pmd_release;
>> --
>> 2.8.1
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v13 5/6] doc: add DMA device library guide
  2021-08-03 14:55     ` Jerin Jacob
@ 2021-08-05 13:15       ` fengchengwen
  0 siblings, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-08-05 13:15 UTC (permalink / raw)
  To: Jerin Jacob
  Cc: Thomas Monjalon, Ferruh Yigit, Richardson, Bruce, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin

On 2021/8/3 22:55, Jerin Jacob wrote:
> On Tue, Aug 3, 2021 at 5:03 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
>>
>> This patch adds dmadev library guide.
>>
>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
>> ---
>>  doc/guides/prog_guide/dmadev.rst        | 126 +++++++++++++++
> 
> 
> doc build has following warning in my machine
> 
> ninja: Entering directory `build'
> [2789/2813] Generating html_guides with a custom command
> /export/dpdk.org/doc/guides/prog_guide/dmadev.rst:24: WARNING: Figure
> caption must be a paragraph or empty comment.
will fix in v14

> 
> .. figure:: img/dmadev_i1.*
> 
>    The model of the DMA framework built on
> 
>  * The DMA controller could have multiple hardware DMA channels (aka. hardware
>    DMA queues), each hardware DMA channel should be represented by a dmadev.
>  * The dmadev could create multiple virtual DMA channels, each virtual DMA
>    channel represents a different transfer context. The DMA operation request
>    must be submitted to the virtual DMA channel. e.g. Application could create
>    virtual DMA channel 0 for memory-to-memory transfer scenario, and create
>    virtual DMA channel 1 for memory-to-device transfer scenario.
> [2813/2813] Linking target app/dpdk-test-pipeline
> 
>> new file mode 100644
>> index 0000000..b305beb
>> --- /dev/null
>> +++ b/doc/guides/prog_guide/img/dmadev_i1.svg
> 
> why _i1 in the name?

OK, maybe dmadev.svg is enough.

> 
> 
>> @@ -0,0 +1,278 @@
>> +<?xml version="1.0" encoding="UTF-8" standalone="no"?>
>> +<!-- Created with Inkscape (http://www.inkscape.org/) -->
> 
> You could add an SPDX license and your company copyright as well.
> See other .svg files.

OK

> 
> 
> Rest looks good to me.
> 
> 
>> +

[snip]

>>
> .
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v13 4/6] dmadev: introduce DMA device library implementation
  2021-08-05 13:12       ` fengchengwen
@ 2021-08-05 13:44         ` Conor Walsh
  0 siblings, 0 replies; 339+ messages in thread
From: Conor Walsh @ 2021-08-05 13:44 UTC (permalink / raw)
  To: fengchengwen, thomas, Yigit, Ferruh, Richardson,  Bruce, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor, Ananyev,
	Konstantin


On 05/08/2021 14:12, fengchengwen wrote:
> On 2021/8/5 20:56, Walsh, Conor wrote:
>>> This patch introduce DMA device library implementation which includes
>>> configuration and I/O with the DMA devices.
> [snip]
>
>>>   /**
>>>    * @warning
>>> @@ -952,10 +1029,27 @@ rte_dmadev_completed(uint16_t dev_id,
>>> uint16_t vchan, const uint16_t nb_cpls,
>>>    *   status array are also set.
>>>    */
>> Hi Chenwen,
>>
>> When completed status is called with status set to NULL the drivers will segfault.
>> Users may have a valid use case where they pass NULL as status so it needs to be
>> checked and handled appropriately.
>> Could you handle this within dmadev similar to what I've added below?
>> If added the doxygen comment will also need to be updated to specify NULL as a valid input.
> Hi Conor,
>
> The status must be an array pointer, so below status_tmp will not work well.
>
> This API is slow path (vs completed API), and is designed to obtain detailed
> status information, so application should pass valid status parameters.

Thanks for your quick reply.

That is true that it is designed to be slower and return detailed status 
info but should we not handle it more gracefully than segfaulting.

I don't have too strong of an opinion either way so it's ok to ignore.

/Conor.


>
>> Thanks,
>> Conor.
>>
>>>   __rte_experimental
>>> -uint16_t
>>> +static inline uint16_t
>>>   rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
>>>   			    const uint16_t nb_cpls, uint16_t *last_idx,
>>> -			    enum rte_dma_status_code *status);
>>> +			    enum rte_dma_status_code *status)
>>> +{
>>> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
>>> +	uint16_t idx;
>>                 enum rte_dma_status_code *status_tmp;
>>> +
>>> +#ifdef RTE_DMADEV_DEBUG
>>> +	if (!rte_dmadev_is_valid_dev(dev_id) ||
>>> +	    vchan >= dev->data->dev_conf.max_vchans ||
>>> +	    nb_cpls == 0 || status == NULL)
>>> +		return 0;
>>> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
>>> +#endif
>>> +
>>> +	if (last_idx == NULL)
>>> +		last_idx = &idx;
>>                 if (status == NULL)
>>                               status = &status_tmp;
>>> +
>>> +	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx,
>>> status);
>>> +}
>>>
>>>   #ifdef __cplusplus
>>>   }
>>> diff --git a/lib/dmadev/rte_dmadev_core.h
>>> b/lib/dmadev/rte_dmadev_core.h
>>> index 599ab15..9272725 100644
>>> --- a/lib/dmadev/rte_dmadev_core.h
>>> +++ b/lib/dmadev/rte_dmadev_core.h
>>> @@ -177,4 +177,6 @@ struct rte_dmadev {
>>>   	uint64_t reserved[2]; /**< Reserved for future fields. */
>>>   } __rte_cache_aligned;
>>>
>>> +extern struct rte_dmadev rte_dmadevices[];
>>> +
>>>   #endif /* _RTE_DMADEV_CORE_H_ */
>>> diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
>>> index 408b93c..86c5e75 100644
>>> --- a/lib/dmadev/version.map
>>> +++ b/lib/dmadev/version.map
>>> @@ -27,6 +27,7 @@ EXPERIMENTAL {
>>>   INTERNAL {
>>>           global:
>>>
>>> +	rte_dmadevices;
>>>   	rte_dmadev_get_device_by_name;
>>>   	rte_dmadev_pmd_allocate;
>>>   	rte_dmadev_pmd_release;
>>> --
>>> 2.8.1

^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v14 0/6] support dmadev
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (16 preceding siblings ...)
  2021-08-03 11:29 ` [dpdk-dev] [PATCH v13 0/6] support dmadev Chengwen Feng
@ 2021-08-10 11:54 ` Chengwen Feng
  2021-08-10 11:54   ` [dpdk-dev] [PATCH v14 1/6] dmadev: introduce DMA device library public APIs Chengwen Feng
                     ` (5 more replies)
  2021-08-13  9:09 ` [dpdk-dev] [PATCH v15 0/6] support dmadev Chengwen Feng
                   ` (11 subsequent siblings)
  29 siblings, 6 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-10 11:54 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch set contains six patch for new add dmadev.

Chengwen Feng (6):
  dmadev: introduce DMA device library public APIs
  dmadev: introduce DMA device library internal header
  dmadev: introduce DMA device library PMD header
  dmadev: introduce DMA device library implementation
  doc: add DMA device library guide
  maintainers: add for dmadev

---
v14:
* rte_dmadev_vchan_setup add vchan parameter.
* rename max_vchans to nb_vchans of struct rte_dmadev_conf.
* fix dmadev programming guide doxygen warning.
v13:
* add dmadev_i1.svg.
* delete one unnecessary comment line of rte_dmadev_info_get.
v12:
* add max_sges filed for struct rte_dmadev_info.
* add more descriptor of dmadev.rst.
* replace scatter with scatter gather in code comment.
* split to six patch.
* fix typo.
v11:
* rename RTE_DMA_STATUS_UNKNOWN to RTE_DMA_STATUS_ERROR_UNKNOWN.
* add RTE_DMA_STATUS_INVALID_ADDR marco.
* update release-note.
* add acked-by for 1/2 patch.
* add dmadev programming guide which is 2/2 patch.
v10:
* fix rte_dmadev_completed_status comment.

 MAINTAINERS                          |    5 +
 config/rte_config.h                  |    3 +
 doc/api/doxy-api-index.md            |    1 +
 doc/api/doxy-api.conf.in             |    1 +
 doc/guides/prog_guide/dmadev.rst     |  126 ++++
 doc/guides/prog_guide/img/dmadev.svg |  283 +++++++++
 doc/guides/prog_guide/index.rst      |    1 +
 lib/dmadev/meson.build               |    7 +
 lib/dmadev/rte_dmadev.c              |  567 ++++++++++++++++++
 lib/dmadev/rte_dmadev.h              | 1058 ++++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h         |  182 ++++++
 lib/dmadev/rte_dmadev_pmd.h          |   72 +++
 lib/dmadev/version.map               |   36 ++
 lib/meson.build                      |    1 +
 14 files changed, 2343 insertions(+)
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v14 1/6] dmadev: introduce DMA device library public APIs
  2021-08-10 11:54 ` [dpdk-dev] [PATCH v14 " Chengwen Feng
@ 2021-08-10 11:54   ` Chengwen Feng
  2021-08-10 11:54   ` [dpdk-dev] [PATCH v14 2/6] dmadev: introduce DMA device library internal header Chengwen Feng
                     ` (4 subsequent siblings)
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-10 11:54 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

The 'dmadevice' is a generic type of DMA device.

This patch introduce the 'dmadevice' public APIs which expose generic
operations that can enable configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
---
 doc/api/doxy-api-index.md |   1 +
 doc/api/doxy-api.conf.in  |   1 +
 lib/dmadev/meson.build    |   4 +
 lib/dmadev/rte_dmadev.h   | 962 ++++++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/version.map    |  25 ++
 lib/meson.build           |   1 +
 6 files changed, 994 insertions(+)
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/version.map

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107..ce08250 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -27,6 +27,7 @@ The public API headers are grouped by topics:
   [event_timer_adapter]    (@ref rte_event_timer_adapter.h),
   [event_crypto_adapter]   (@ref rte_event_crypto_adapter.h),
   [rawdev]             (@ref rte_rawdev.h),
+  [dmadev]             (@ref rte_dmadev.h),
   [metrics]            (@ref rte_metrics.h),
   [bitrate]            (@ref rte_bitrate.h),
   [latency]            (@ref rte_latencystats.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 325a019..a44a92b 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -34,6 +34,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/cmdline \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
+                          @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/distributor \
                           @TOPDIR@/lib/efd \
                           @TOPDIR@/lib/ethdev \
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000..6d5bd85
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+headers = files('rte_dmadev.h')
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000..3c72aa8
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,962 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ * Copyright(c) 2021 Marvell International Ltd.
+ * Copyright(c) 2021 SmartShare Systems.
+ */
+
+#ifndef _RTE_DMADEV_H_
+#define _RTE_DMADEV_H_
+
+/**
+ * @file rte_dmadev.h
+ *
+ * RTE DMA (Direct Memory Access) device APIs.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW-DMA-channel |               | HW-DMA-channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW-DMA-Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel. e.g. Application could create
+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+ * virtual DMA channel 1 for memory-to-device transfer scenario.
+ *
+ * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
+ * PCI/SoC device probing phase performed at EAL initialization time. And could
+ * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
+ * phase.
+ *
+ * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dmadev_configure()
+ *     - rte_dmadev_vchan_setup()
+ *     - rte_dmadev_start()
+ *
+ * Then, the application can invoke dataplane APIs to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dmadev_configure() or rte_dmadev_vchan_setup()), it must invoke
+ * rte_dmadev_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dmadev_start() again. The dataplane APIs should not be
+ * invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the
+ * rte_dmadev_close() function.
+ *
+ * The dataplane APIs include two parts:
+ * The first part is the submission of operation requests:
+ *     - rte_dmadev_copy()
+ *     - rte_dmadev_copy_sg()
+ *     - rte_dmadev_fill()
+ *     - rte_dmadev_submit()
+ *
+ * These APIs could work with different virtual DMA channels which have
+ * different contexts.
+ *
+ * The first three APIs are used to submit the operation request to the virtual
+ * DMA channel, if the submission is successful, an uint16_t ring_idx is
+ * returned, otherwise a negative number is returned.
+ *
+ * The last API was used to issue doorbell to hardware, and also there are flags
+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
+ * same work.
+ *
+ * The second part is to obtain the result of requests:
+ *     - rte_dmadev_completed()
+ *         - return the number of operation requests completed successfully.
+ *     - rte_dmadev_completed_status()
+ *         - return the number of operation requests completed.
+ *
+ * @note If the dmadev works in silent mode (@see RTE_DMADEV_CAPA_SILENT),
+ * application does not invoke the above two completed APIs.
+ *
+ * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
+ * rte_dmadev_fill()) returned, the rules are as follows:
+ *     - ring_idx for each virtual DMA channel are independent.
+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *       when it reach UINT16_MAX, it wraps back to zero.
+ *     - This ring_idx can be used by applications to track per-operation
+ *       metadata in an application-defined circular ring.
+ *     - The initial ring_idx of a virtual DMA channel is zero, after the
+ *       device is stopped, the ring_idx needs to be reset to zero.
+ *
+ * One example:
+ *     - step-1: start one dmadev
+ *     - step-2: enqueue a copy operation, the ring_idx return is 0
+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1
+ *     - ...
+ *     - step-101: stop the dmadev
+ *     - step-102: start the dmadev
+ *     - step-103: enqueue a copy operation, the cookie return is 0
+ *     - ...
+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *
+ * The DMA operation address used in enqueue APIs (i.e. rte_dmadev_copy(),
+ * rte_dmadev_copy_sg(), rte_dmadev_fill()) defined as rte_iova_t type. The
+ * dmadev supports two types of address: memory address and device address.
+ *
+ * - memory address: the source and destination address of the memory-to-memory
+ * transfer type, or the source address of the memory-to-device transfer type,
+ * or the destination address of the device-to-memory transfer type.
+ * @note If the device support SVA (@see RTE_DMADEV_CAPA_SVA), the memory
+ * address can be any VA address, otherwise it must be an IOVA address.
+ *
+ * - device address: the source and destination address of the device-to-device
+ * transfer type, or the source address of the device-to-memory transfer type,
+ * or the destination address of the memory-to-device transfer type.
+ *
+ * By default, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
+ *
+ */
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the device identifier for the named DMA device.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   Returns DMA device identifier on success.
+ *   - <0: Failure to find named DMA device.
+ */
+__rte_experimental
+int
+rte_dmadev_get_dev_id(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_count(void);
+
+/* Enumerates DMA device capabilities. */
+#define RTE_DMADEV_CAPA_MEM_TO_MEM	(1ull << 0)
+/**< DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_MEM_TO_DEV	(1ull << 1)
+/**< DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_MEM	(1ull << 2)
+/**< DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_DEV	(1ull << 3)
+/**< DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_SVA		(1ull << 4)
+/**< DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_SILENT		(1ull << 5)
+/**< DMA device support work in silent mode.
+ * In this mode, application don't required to invoke rte_dmadev_completed*()
+ * API.
+ *
+ * @see struct rte_dmadev_conf::silent_mode
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY	(1ull << 32)
+/**< DMA device support copy ops.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY_SG	(1ull << 33)
+/**< DMA device support scatter-gather list copy ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_FILL	(1ull << 34)
+/**< DMA device support fill ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ */
+struct rte_dmadev_info {
+	struct rte_device *device; /**< Generic Device information. */
+	uint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channels supported. */
+	uint16_t max_desc;
+	/**< Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/**< Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t max_sges;
+	/**< Maximum number of source or destination scatter-gather entry
+	 * supported.
+	 * If the device does not support COPY_SG capability, this value can be
+	 * zero.
+	 * If the device supports COPY_SG capability, then rte_dmadev_copy_sg()
+	 * parameter nb_src/nb_dst should not exceed this value.
+	 */
+	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   - =0: Success, driver updates the information of the DMA device.
+ *   - <0: Error code returned by the driver info get function.
+ */
+__rte_experimental
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ */
+struct rte_dmadev_conf {
+	uint16_t nb_vchans;
+	/**< The number of virtual DMA channels to set up for the DMA device.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dmadev_info which get from rte_dmadev_info_get().
+	 */
+	bool enable_silent;
+	/**< Indicates whether to enable silent mode.
+	 * false-default mode, true-silent mode.
+	 * This value can be set to true only when the SILENT capability is
+	 * supported.
+	 *
+	 * @see RTE_DMADEV_CAPA_SILENT
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dmadev_conf
+ *   object.
+ *
+ * @return
+ *   - =0: Success, device configured.
+ *   - <0: Error code returned by the driver configuration function.
+ */
+__rte_experimental
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device started.
+ *   - <0: Error code returned by the driver start function.
+ */
+__rte_experimental
+int
+rte_dmadev_start(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dmadev_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Success, device stopped.
+ *   - <0: Error code returned by the driver stop function.
+ */
+__rte_experimental
+int
+rte_dmadev_stop(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - =0: Successfully close device
+ *   - <0: Failure to close device
+ */
+__rte_experimental
+int
+rte_dmadev_close(uint16_t dev_id);
+
+/**
+ * rte_dma_direction - DMA transfer direction defines.
+ */
+enum rte_dma_direction {
+	RTE_DMA_DIR_MEM_TO_MEM,
+	/**< DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV,
+	/**< DMA transfer direction - from memory to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is SoCs memory) to device (which is host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM,
+	/**< DMA transfer direction - from device to memory.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to memory (which is SoCs memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV,
+	/**< DMA transfer direction - from device to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to the device (which is another host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+};
+
+/**
+ * enum rte_dmadev_port_type - DMA access port type defines.
+ *
+ * @see struct rte_dmadev_port_param::port_type
+ */
+enum rte_dmadev_port_type {
+	RTE_DMADEV_PORT_NONE,
+	RTE_DMADEV_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dmadev_vchan_conf::src_port
+ * @see struct rte_dmadev_vchan_conf::dst_port
+ */
+struct rte_dmadev_port_param {
+	enum rte_dmadev_port_type port_type;
+	/**< The device access port type.
+	 * @see enum rte_dmadev_port_type
+	 */
+	union {
+		/** PCIe access port parameters.
+		 *
+		 * The following model shows SoC's PCIe module connects to
+		 * multiple PCIe hosts and multiple endpoints. The PCIe module
+		 * has an integrated DMA controller.
+		 *
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * \code{.unparsed}
+		 * System Bus
+		 *    |     ----------PCIe module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIe Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIe Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIe Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * \endcode
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		struct {
+			uint64_t coreid : 4; /**< PCIe core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			uint64_t pasid : 20;
+			/**< The pasid filed in TLP packet. */
+			uint64_t attr : 3;
+			/**< The attributes filed in TLP packet. */
+			uint64_t ph : 2;
+			/**< The processing hint filed in TLP packet. */
+			uint64_t st : 16;
+			/**< The steering tag filed in TLP packet. */
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ */
+struct rte_dmadev_vchan_conf {
+	enum rte_dma_direction direction;
+	/**< Transfer direction
+	 * @see enum rte_dma_direction
+	 */
+	uint16_t nb_desc;
+	/**< Number of descriptor for the virtual DMA channel */
+	struct rte_dmadev_port_param src_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+	struct rte_dmadev_port_param dst_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel. The value must be in the range
+ *   [0, nb_vchans - 1] previously supplied to rte_dmadev_configure().
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dmadev_vchan_conf object.
+ *
+ * @return
+ *   0 on success. Otherwise negative errno is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+		       const struct rte_dmadev_vchan_conf *conf);
+
+/**
+ * rte_dmadev_stats - running statistics.
+ */
+struct rte_dmadev_stats {
+	uint64_t submitted_count;
+	/**< Count of operations which were submitted to hardware. */
+	uint64_t completed_fail_count;
+	/**< Count of operations which failed to complete. */
+	uint64_t completed_count;
+	/**< Count of operations which successfully complete. */
+};
+
+#define RTE_DMADEV_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dmadev_stats
+ *   object.
+ *
+ * @return
+ *   - =0: Successfully retrieve stats.
+ *   - <0: Failure to retrieve stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   - =0: Successfully reset stats.
+ *   - <0: Failure to reset stats.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Non-zero otherwise.
+ */
+__rte_experimental
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger the dmadev self test.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - 0: Selftest successful.
+ *   - -ENOTSUP if the device doesn't support selftest
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_selftest(uint16_t dev_id);
+
+/**
+ * rte_dma_status_code - DMA transfer result status code defines.
+ */
+enum rte_dma_status_code {
+	RTE_DMA_STATUS_SUCCESSFUL,
+	/**< The operation completed successfully. */
+	RTE_DMA_STATUS_USER_ABORT,
+	/**< The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPTED,
+	/**< The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/**< The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/**< The operation failed to complete due invalid destination
+	 * address.
+	 */
+	RTE_DMA_STATUS_INVALID_ADDR,
+	/**< The operation failed to complete due invalid source or destination
+	 * address, cover the case that only knows the address error, but not
+	 * sure which address error.
+	 */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/**< The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/**< The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/**< The operation failed to complete due bus err. */
+	RTE_DMA_STATUS_DATA_POISION,
+	/**< The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/**< The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/**< The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the memory-to-device/
+	 * device-to-memory/device-to-device transfer scenario.
+	 */
+	RTE_DMA_STATUS_ERROR_UNKNOWN = 0x100,
+	/**< The operation failed to complete due unknown reason.
+	 * The initial value is 256, which reserves space for future errors.
+	 */
+};
+
+/**
+ * rte_dmadev_sge - can hold scatter-gather DMA operation request entry.
+ */
+struct rte_dmadev_sge {
+	rte_iova_t addr; /**< The DMA operation address. */
+	uint32_t length; /**< The DMA operation length. */
+};
+
+/* DMA flags to augment operation preparation. */
+#define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
+/**< DMA fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ * If the specify DMA HW works in-order (it means it has default fence between
+ * operations), this flag could be NOP.
+ *
+ * @see rte_dmadev_copy()
+ * @see rte_dmadev_copy_sg()
+ * @see rte_dmadev_fill()
+ */
+
+#define RTE_DMA_OP_FLAG_SUBMIT	(1ull << 1)
+/**< DMA submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ */
+
+#define RTE_DMA_OP_FLAG_LLC	(1ull << 2)
+/**< DMA write data to low level cache hint.
+ * Used for performance optimization, this is just a hint, and there is no
+ * capability bit for this, driver should not return error if this flag was set.
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy job.
+ *   - <0: Error code returned by the driver copy function.
+ */
+__rte_experimental
+int
+rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+		uint32_t length, uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter-gather list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter-gather list copy operation to be performed by
+ * hardware, if the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then
+ * trigger doorbell to begin this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The pointer of source scatter-gather entry array.
+ * @param dst
+ *   The pointer of destination scatter-gather entry array.
+ * @param nb_src
+ *   The number of source scatter-gather entry.
+ *   @see struct rte_dmadev_info::max_sges
+ * @param nb_dst
+ *   The number of destination scatter-gather entry.
+ *   @see struct rte_dmadev_info::max_sges
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued copy scatter-gather list job.
+ *   - <0: Error code returned by the driver copy scatter-gather list function.
+ */
+__rte_experimental
+int
+rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
+		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
+		   uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued fill job.
+ *   - <0: Error code returned by the driver fill function.
+ */
+__rte_experimental
+int
+rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
+		rte_iova_t dst, uint32_t length, uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dmadev_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   - =0: Successfully trigger hardware.
+ *   - <0: Failure to trigger hardware.
+ */
+__rte_experimental
+int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed. This return value
+ *   must be less than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		     uint16_t *last_idx, bool *has_error);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been completed, and the
+ * operations result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   This is a pointer to an array of length 'nb_cpls' that holds the completion
+ *   status code of each operation.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that completed. This return value must be less
+ *   than or equal to the value of nb_cpls.
+ *   If this number is greater than zero (assuming n), then n values in the
+ *   status array are also set.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
+			    const uint16_t nb_cpls, uint16_t *last_idx,
+			    enum rte_dma_status_code *status);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000..02fffe3
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,25 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dmadev_close;
+	rte_dmadev_completed;
+	rte_dmadev_completed_status;
+	rte_dmadev_configure;
+	rte_dmadev_copy;
+	rte_dmadev_copy_sg;
+	rte_dmadev_count;
+	rte_dmadev_dump;
+	rte_dmadev_fill;
+	rte_dmadev_get_dev_id;
+	rte_dmadev_info_get;
+	rte_dmadev_is_valid_dev;
+	rte_dmadev_selftest;
+	rte_dmadev_start;
+	rte_dmadev_stats_get;
+	rte_dmadev_stats_reset;
+	rte_dmadev_stop;
+	rte_dmadev_submit;
+	rte_dmadev_vchan_setup;
+
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4..a542c23 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -44,6 +44,7 @@ libraries = [
         'power',
         'pdump',
         'rawdev',
+        'dmadev',
         'regexdev',
         'rib',
         'reorder',
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v14 2/6] dmadev: introduce DMA device library internal header
  2021-08-10 11:54 ` [dpdk-dev] [PATCH v14 " Chengwen Feng
  2021-08-10 11:54   ` [dpdk-dev] [PATCH v14 1/6] dmadev: introduce DMA device library public APIs Chengwen Feng
@ 2021-08-10 11:54   ` Chengwen Feng
  2021-08-10 11:54   ` [dpdk-dev] [PATCH v14 3/6] dmadev: introduce DMA device library PMD header Chengwen Feng
                     ` (3 subsequent siblings)
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-10 11:54 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce DMA device library internal header, which contains
internal data types that are used by the DMA devices in order to expose
their ops to the class.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 lib/dmadev/meson.build       |   1 +
 lib/dmadev/rte_dmadev_core.h | 180 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 181 insertions(+)
 create mode 100644 lib/dmadev/rte_dmadev_core.h

diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index 6d5bd85..f421ec1 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -2,3 +2,4 @@
 # Copyright(c) 2021 HiSilicon Limited.
 
 headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000..ff7b70a
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#ifndef _RTE_DMADEV_CORE_H_
+#define _RTE_DMADEV_CORE_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device internal header.
+ *
+ * This header contains internal data types, that are used by the DMA devices
+ * in order to expose their ops to the class.
+ *
+ * Applications should not use these API directly.
+ *
+ */
+
+struct rte_dmadev;
+
+typedef int (*rte_dmadev_info_get_t)(const struct rte_dmadev *dev,
+				     struct rte_dmadev_info *dev_info,
+				     uint32_t info_sz);
+/**< @internal Used to get device information of a device. */
+
+typedef int (*rte_dmadev_configure_t)(struct rte_dmadev *dev,
+				      const struct rte_dmadev_conf *dev_conf);
+/**< @internal Used to configure a device. */
+
+typedef int (*rte_dmadev_start_t)(struct rte_dmadev *dev);
+/**< @internal Used to start a configured device. */
+
+typedef int (*rte_dmadev_stop_t)(struct rte_dmadev *dev);
+/**< @internal Used to stop a configured device. */
+
+typedef int (*rte_dmadev_close_t)(struct rte_dmadev *dev);
+/**< @internal Used to close a configured device. */
+
+typedef int (*rte_dmadev_vchan_setup_t)(struct rte_dmadev *dev, uint16_t vchan,
+				const struct rte_dmadev_vchan_conf *conf);
+/**< @internal Used to allocate and set up a virtual DMA channel. */
+
+typedef int (*rte_dmadev_stats_get_t)(const struct rte_dmadev *dev,
+			uint16_t vchan, struct rte_dmadev_stats *stats,
+			uint32_t stats_sz);
+/**< @internal Used to retrieve basic statistics. */
+
+typedef int (*rte_dmadev_stats_reset_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to reset basic statistics. */
+
+typedef int (*rte_dmadev_dump_t)(const struct rte_dmadev *dev, FILE *f);
+/**< @internal Used to dump internal information. */
+
+typedef int (*rte_dmadev_selftest_t)(uint16_t dev_id);
+/**< @internal Used to start dmadev selftest. */
+
+typedef int (*rte_dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 rte_iova_t src, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a copy operation. */
+
+typedef int (*rte_dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
+				    const struct rte_dmadev_sge *src,
+				    const struct rte_dmadev_sge *dst,
+				    uint16_t nb_src, uint16_t nb_dst,
+				    uint64_t flags);
+/**< @internal Used to enqueue a scatter-gather list copy operation. */
+
+typedef int (*rte_dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 uint64_t pattern, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a fill operation. */
+
+typedef int (*rte_dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to trigger hardware to begin working. */
+
+typedef uint16_t (*rte_dmadev_completed_t)(struct rte_dmadev *dev,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+/**< @internal Used to return number of successful completed operations. */
+
+typedef uint16_t (*rte_dmadev_completed_status_t)(struct rte_dmadev *dev,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+/**< @internal Used to return number of completed operations. */
+
+/**
+ * Possible states of a DMA device.
+ */
+enum rte_dmadev_state {
+	RTE_DMADEV_UNUSED = 0,
+	/**< Device is unused before being probed. */
+	RTE_DMADEV_ATTACHED,
+	/**< Device is attached when allocated in probing. */
+};
+
+/**
+ * DMA device operations function pointer table
+ */
+struct rte_dmadev_ops {
+	rte_dmadev_info_get_t dev_info_get;
+	rte_dmadev_configure_t dev_configure;
+	rte_dmadev_start_t dev_start;
+	rte_dmadev_stop_t dev_stop;
+	rte_dmadev_close_t dev_close;
+	rte_dmadev_vchan_setup_t vchan_setup;
+	rte_dmadev_stats_get_t stats_get;
+	rte_dmadev_stats_reset_t stats_reset;
+	rte_dmadev_dump_t dev_dump;
+	rte_dmadev_selftest_t dev_selftest;
+};
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_dmadev_data {
+	void *dev_private;
+	/**< PMD-specific private data.
+	 * This is a copy of the 'dev_private' field in the 'struct rte_dmadev'
+	 * from primary process, it is used by the secondary process to get
+	 * dev_private information.
+	 */
+	uint16_t dev_id; /**< Device [external] identifier. */
+	char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ *
+ * The dataplane APIs are located at the beginning of the structure, along
+ * with the pointer to where all the data elements for the particular device
+ * are stored in shared memory. This split scheme allows the function pointer
+ * and driver data to be per-process, while the actual configuration data for
+ * the device is shared.
+ * And the 'dev_private' field was placed in the first cache line to optimize
+ * performance because the PMD driver mainly depends on this field.
+ */
+struct rte_dmadev {
+	rte_dmadev_copy_t copy;
+	rte_dmadev_copy_sg_t copy_sg;
+	rte_dmadev_fill_t fill;
+	rte_dmadev_submit_t submit;
+	rte_dmadev_completed_t completed;
+	rte_dmadev_completed_status_t completed_status;
+	void *reserved_ptr; /**< Reserved for future IO function. */
+	void *dev_private;
+	/**< PMD-specific private data.
+	 *
+	 * - If is the primary process, after dmadev allocated by
+	 * rte_dmadev_pmd_allocate(), the PCI/SoC device probing should
+	 * initialize this field, and copy it's value to the 'dev_private'
+	 * field of 'struct rte_dmadev_data' which pointer by 'data' filed.
+	 *
+	 * - If is the secondary process, dmadev framework will initialize this
+	 * field by copy from 'dev_private' field of 'struct rte_dmadev_data'
+	 * which initialized by primary process.
+	 *
+	 * @note It's the primary process responsibility to deinitialize this
+	 * field after invoke rte_dmadev_pmd_release() in the PCI/SoC device
+	 * removing stage.
+	 */
+	struct rte_dmadev_data *data; /**< Pointer to device data. */
+	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
+	struct rte_device *device;
+	/**< Device info which supplied during device initialization. */
+	enum rte_dmadev_state state; /**< Flag indicating the device state. */
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+} __rte_cache_aligned;
+
+#endif /* _RTE_DMADEV_CORE_H_ */
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v14 3/6] dmadev: introduce DMA device library PMD header
  2021-08-10 11:54 ` [dpdk-dev] [PATCH v14 " Chengwen Feng
  2021-08-10 11:54   ` [dpdk-dev] [PATCH v14 1/6] dmadev: introduce DMA device library public APIs Chengwen Feng
  2021-08-10 11:54   ` [dpdk-dev] [PATCH v14 2/6] dmadev: introduce DMA device library internal header Chengwen Feng
@ 2021-08-10 11:54   ` Chengwen Feng
  2021-08-10 11:54   ` [dpdk-dev] [PATCH v14 4/6] dmadev: introduce DMA device library implementation Chengwen Feng
                     ` (2 subsequent siblings)
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-10 11:54 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce DMA device library PMD header which was driver
facing APIs for a DMA device.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 lib/dmadev/meson.build      |  1 +
 lib/dmadev/rte_dmadev.h     |  2 ++
 lib/dmadev/rte_dmadev_pmd.h | 72 +++++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/version.map      | 10 +++++++
 4 files changed, 85 insertions(+)
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h

diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index f421ec1..833baf7 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -3,3 +3,4 @@
 
 headers = files('rte_dmadev.h')
 indirect_headers += files('rte_dmadev_core.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 3c72aa8..48803ae 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -743,6 +743,8 @@ struct rte_dmadev_sge {
 	uint32_t length; /**< The DMA operation length. */
 };
 
+#include "rte_dmadev_core.h"
+
 /* DMA flags to augment operation preparation. */
 #define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
 /**< DMA fence flag.
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000..45141f9
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_PMD_H_
+#define _RTE_DMADEV_PMD_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device PMD APIs
+ *
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param dev
+ *   Device to be released.
+ *
+ * @return
+ *   - 0 on success, negative on error
+ */
+__rte_internal
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev);
+
+/**
+ * @internal
+ * Return the DMA device based on the device name.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_PMD_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index 02fffe3..408b93c 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -23,3 +23,13 @@ EXPERIMENTAL {
 
 	local: *;
 };
+
+INTERNAL {
+        global:
+
+	rte_dmadev_get_device_by_name;
+	rte_dmadev_pmd_allocate;
+	rte_dmadev_pmd_release;
+
+	local: *;
+};
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v14 4/6] dmadev: introduce DMA device library implementation
  2021-08-10 11:54 ` [dpdk-dev] [PATCH v14 " Chengwen Feng
                     ` (2 preceding siblings ...)
  2021-08-10 11:54   ` [dpdk-dev] [PATCH v14 3/6] dmadev: introduce DMA device library PMD header Chengwen Feng
@ 2021-08-10 11:54   ` Chengwen Feng
  2021-08-10 11:54   ` [dpdk-dev] [PATCH v14 5/6] doc: add DMA device library guide Chengwen Feng
  2021-08-10 11:54   ` [dpdk-dev] [PATCH v14 6/6] maintainers: add for dmadev Chengwen Feng
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-10 11:54 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce DMA device library implementation which includes
configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 config/rte_config.h          |   3 +
 lib/dmadev/meson.build       |   1 +
 lib/dmadev/rte_dmadev.c      | 567 +++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev.h      | 118 ++++++++-
 lib/dmadev/rte_dmadev_core.h |   2 +
 lib/dmadev/version.map       |   1 +
 6 files changed, 680 insertions(+), 12 deletions(-)
 create mode 100644 lib/dmadev/rte_dmadev.c

diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c..331a431 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -81,6 +81,9 @@
 /* rawdev defines */
 #define RTE_RAWDEV_MAX_DEVS 64
 
+/* dmadev defines */
+#define RTE_DMADEV_MAX_DEVS 64
+
 /* ip_fragmentation defines */
 #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
 #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index 833baf7..d2fc85e 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2021 HiSilicon Limited.
 
+sources = files('rte_dmadev.c')
 headers = files('rte_dmadev.h')
 indirect_headers += files('rte_dmadev_core.h')
 driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000..80be485
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,567 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
+
+static const char *mz_rte_dmadev_data = "rte_dmadev_data";
+/* Shared memory between primary and secondary processes. */
+static struct {
+	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
+} *dmadev_shared_data;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dmadev_logtype, INFO);
+#define RTE_DMADEV_LOG(level, ...) \
+	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "" __VA_ARGS__)
+
+/* Macros to check for valid device id */
+#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dmadev_is_valid_dev(dev_id)) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+static int
+dmadev_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMADEV_LOG(ERR, "Name can't be NULL\n");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMADEV_LOG(ERR, "Zero length DMA device name\n");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
+		RTE_DMADEV_LOG(ERR, "DMA device name is too long\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint16_t
+dmadev_find_free_dev(void)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (dmadev_shared_data->data[i].dev_name[0] == '\0')
+			return i;
+	}
+
+	return RTE_DMADEV_MAX_DEVS;
+}
+
+static struct rte_dmadev*
+dmadev_find(const char *name)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&
+		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
+			return &rte_dmadevices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dmadev_shared_data_prepare(void)
+{
+	const struct rte_memzone *mz;
+
+	if (dmadev_shared_data == NULL) {
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			/* Allocate port data and ownership shared memory. */
+			mz = rte_memzone_reserve(mz_rte_dmadev_data,
+					 sizeof(*dmadev_shared_data),
+					 rte_socket_id(), 0);
+		} else
+			mz = rte_memzone_lookup(mz_rte_dmadev_data);
+		if (mz == NULL)
+			return -ENOMEM;
+
+		dmadev_shared_data = mz->addr;
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+			memset(dmadev_shared_data->data, 0,
+			       sizeof(dmadev_shared_data->data));
+	}
+
+	return 0;
+}
+
+static struct rte_dmadev *
+dmadev_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t dev_id;
+
+	dev = dmadev_find(name);
+	if (dev != NULL) {
+		RTE_DMADEV_LOG(ERR, "DMA device already allocated\n");
+		return NULL;
+	}
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	dev_id = dmadev_find_free_dev();
+	if (dev_id == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices\n");
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+	dev->data = &dmadev_shared_data->data[dev_id];
+	dev->data->dev_id = dev_id;
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+
+	return dev;
+}
+
+static struct rte_dmadev *
+dmadev_attach_secondary(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t i;
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %s is not driven by the primary process\n",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[i];
+	dev->data = &dmadev_shared_data->data[i];
+	dev->dev_private = dev->data->dev_private;
+
+	return dev;
+}
+
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dmadev_allocate(name);
+	else
+		dev = dmadev_attach_secondary(name);
+
+	if (dev == NULL)
+		return NULL;
+	dev->state = RTE_DMADEV_ATTACHED;
+
+	return dev;
+}
+
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev)
+{
+	void *dev_private_tmp;
+
+	if (dev == NULL)
+		return -EINVAL;
+
+	if (dev->state == RTE_DMADEV_UNUSED)
+		return 0;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
+
+	dev_private_tmp = dev->dev_private;
+	memset(dev, 0, sizeof(struct rte_dmadev));
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev->dev_private = dev_private_tmp;
+	dev->state = RTE_DMADEV_UNUSED;
+
+	return 0;
+}
+
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name)
+{
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+	return dmadev_find(name);
+}
+
+int
+rte_dmadev_get_dev_id(const char *name)
+{
+	struct rte_dmadev *dev = rte_dmadev_get_device_by_name(name);
+	if (dev != NULL)
+		return dev->data->dev_id;
+	return -EINVAL;
+}
+
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id)
+{
+	return (dev_id < RTE_DMADEV_MAX_DEVS) &&
+		rte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED;
+}
+
+uint16_t
+rte_dmadev_count(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
+			count++;
+	}
+
+	return count;
+}
+
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dmadev_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->device = dev->device;
+	dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
+
+	return 0;
+}
+
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans == 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure zero vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans > info.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure too many vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_silent &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_SILENT)) {
+		RTE_DMADEV_LOG(ERR, "Device %u don't support silent\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf);
+	if (ret == 0)
+		memcpy(&dev->data->dev_conf, dev_conf, sizeof(*dev_conf));
+
+	return ret;
+}
+
+int
+rte_dmadev_start(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already started\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->data->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dmadev_stop(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started == 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already stopped\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dmadev_close(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped before closing\n", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	return (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+		       const struct rte_dmadev_vchan_conf *conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	bool src_is_dev, dst_is_dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (vchan >= info.nb_vchans) {
+		RTE_DMADEV_LOG(ERR, "Device %u vchan out range!\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u number of descriptors invalid\n", dev_id);
+		return -EINVAL;
+	}
+	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->src_port.port_type == RTE_DMADEV_PORT_NONE && src_is_dev) ||
+	    (conf->src_port.port_type != RTE_DMADEV_PORT_NONE && !src_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u source port type invalid\n", dev_id);
+		return -EINVAL;
+	}
+	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->dst_port.port_type == RTE_DMADEV_PORT_NONE && dst_is_dev) ||
+	    (conf->dst_port.port_type != RTE_DMADEV_PORT_NONE && !dst_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u destination port type invalid\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, vchan, conf);
+}
+
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (stats == NULL)
+		return -EINVAL;
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	memset(stats, 0, sizeof(struct rte_dmadev_stats));
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dmadev_stats));
+}
+
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (f == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+
+	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
+	fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
+	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
+	fprintf(f, "  nb_vchans_configured: %u\n", info.nb_vchans);
+	fprintf(f, "  silent_mode: %s\n",
+		dev->data->dev_conf.enable_silent ? "on" : "off");
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
+
+int
+rte_dmadev_selftest(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
+	return (*dev->dev_ops->dev_selftest)(dev_id);
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 48803ae..84a858a 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -799,9 +799,21 @@ struct rte_dmadev_sge {
  *   - <0: Error code returned by the driver copy function.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
-		uint32_t length, uint64_t flags);
+		uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+#endif
+
+	return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
 
 /**
  * @warning
@@ -836,10 +848,23 @@ rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
  *   - <0: Error code returned by the driver copy scatter-gather list function.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
 		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
-		   uint64_t flags);
+		   uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans ||
+	    src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+#endif
+
+	return (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);
+}
 
 /**
  * @warning
@@ -870,9 +895,21 @@ rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
  *   - <0: Error code returned by the driver fill function.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
-		rte_iova_t dst, uint32_t length, uint64_t flags);
+		rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+#endif
+
+	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
 
 /**
  * @warning
@@ -893,8 +930,20 @@ rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
  *   - <0: Failure to trigger hardware.
  */
 __rte_experimental
-int
-rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
+static inline int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+#endif
+
+	return (*dev->submit)(dev, vchan);
+}
 
 /**
  * @warning
@@ -920,9 +969,37 @@ rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
  *   must be less than or equal to the value of nb_cpls.
  */
 __rte_experimental
-uint16_t
+static inline uint16_t
 rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
-		     uint16_t *last_idx, bool *has_error);
+		     uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans || nb_cpls == 0)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
 
 /**
  * @warning
@@ -952,10 +1029,27 @@ rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
  *   status array are also set.
  */
 __rte_experimental
-uint16_t
+static inline uint16_t
 rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
 			    const uint16_t nb_cpls, uint16_t *last_idx,
-			    enum rte_dma_status_code *status);
+			    enum rte_dma_status_code *status)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans ||
+	    nb_cpls == 0 || status == NULL)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
+}
 
 #ifdef __cplusplus
 }
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index ff7b70a..aa8e622 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -177,4 +177,6 @@ struct rte_dmadev {
 	uint64_t reserved[2]; /**< Reserved for future fields. */
 } __rte_cache_aligned;
 
+extern struct rte_dmadev rte_dmadevices[];
+
 #endif /* _RTE_DMADEV_CORE_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index 408b93c..86c5e75 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -27,6 +27,7 @@ EXPERIMENTAL {
 INTERNAL {
         global:
 
+	rte_dmadevices;
 	rte_dmadev_get_device_by_name;
 	rte_dmadev_pmd_allocate;
 	rte_dmadev_pmd_release;
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v14 5/6] doc: add DMA device library guide
  2021-08-10 11:54 ` [dpdk-dev] [PATCH v14 " Chengwen Feng
                     ` (3 preceding siblings ...)
  2021-08-10 11:54   ` [dpdk-dev] [PATCH v14 4/6] dmadev: introduce DMA device library implementation Chengwen Feng
@ 2021-08-10 11:54   ` Chengwen Feng
  2021-08-10 15:27     ` Walsh, Conor
  2021-08-10 11:54   ` [dpdk-dev] [PATCH v14 6/6] maintainers: add for dmadev Chengwen Feng
  5 siblings, 1 reply; 339+ messages in thread
From: Chengwen Feng @ 2021-08-10 11:54 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch adds dmadev library guide.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
 doc/guides/prog_guide/dmadev.rst     | 126 ++++++++++++++++
 doc/guides/prog_guide/img/dmadev.svg | 283 +++++++++++++++++++++++++++++++++++
 doc/guides/prog_guide/index.rst      |   1 +
 3 files changed, 410 insertions(+)
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg

diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
new file mode 100644
index 0000000..6e8cce0
--- /dev/null
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -0,0 +1,126 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright 2021 HiSilicon Limited
+
+DMA Device Library
+====================
+
+The DMA library provides a DMA device framework for management and provisioning
+of hardware and software DMA poll mode drivers, defining generic APIs which
+support a number of different DMA operations.
+
+
+Design Principles
+-----------------
+
+The DMA library follows the same basic principles as those used in DPDK's
+Ethernet Device framework and the RegEx framework. The DMA framework provides
+a generic DMA device framework which supports both physical (hardware)
+and virtual (software) DMA devices as well as a generic DMA API which allows
+DMA devices to be managed and configured and supports DMA operations to be
+provisioned on DMA poll mode driver.
+
+.. _figure_dmadev:
+
+.. figure:: img/dmadev.*
+
+The above figure shows the model on which the DMA framework is built on:
+
+ * The DMA controller could have multiple hardware DMA channels (aka. hardware
+   DMA queues), each hardware DMA channel should be represented by a dmadev.
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+   channel represents a different transfer context. The DMA operation request
+   must be submitted to the virtual DMA channel. e.g. Application could create
+   virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+   virtual DMA channel 1 for memory-to-device transfer scenario.
+
+
+Device Management
+-----------------
+
+Device Creation
+~~~~~~~~~~~~~~~
+
+Physical DMA controller is discovered during the PCI probe/enumeration of the
+EAL function which is executed at DPDK initialization, based on their PCI
+device identifier, each unique PCI BDF (bus/bridge, device, function). Specific
+physical DMA controller, like other physical devices in DPDK can be listed using
+the EAL command line options.
+
+And then dmadevs are dynamically allocated by rte_dmadev_pmd_allocate() based on
+the number of hardware DMA channels.
+
+
+Device Identification
+~~~~~~~~~~~~~~~~~~~~~
+
+Each DMA device, whether physical or virtual is uniquely designated by two
+identifiers:
+
+- A unique device index used to designate the DMA device in all functions
+  exported by the DMA API.
+
+- A device name used to designate the DMA device in console messages, for
+  administration or debugging purposes.
+
+
+Device Configuration
+~~~~~~~~~~~~~~~~~~~~
+
+The rte_dmadev_configure API is used to configure a DMA device.
+
+.. code-block:: c
+
+   int rte_dmadev_configure(uint16_t dev_id,
+                            const struct rte_dmadev_conf *dev_conf);
+
+The ``rte_dmadev_conf`` structure is used to pass the configuration parameters
+for the DMA device for example the number of virtual DMA channels to set up,
+indication of whether to enable silent mode.
+
+
+Configuration of Virtual DMA Channels
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The rte_dmadev_vchan_setup API is used to configure a virtual DMA channel.
+
+.. code-block:: c
+
+   int rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+                              const struct rte_dmadev_vchan_conf *conf);
+
+The ``rte_dmadev_vchan_conf`` structure is used to pass the configuration
+parameters for the virtual DMA channel for example transfer direction, number of
+descriptor for the virtual DMA channel, source device access port parameter,
+destination device access port parameter.
+
+
+Device Features and Capabilities
+--------------------------------
+
+DMA devices may support different feature set. In order to get the supported PMD
+features ``rte_dmadev_info_get`` API which returns the info of the device and
+it's supported features.
+
+A special device capability is silent mode which application don't required to
+invoke dequeue APIs.
+
+
+Enqueue / Dequeue APIs
+~~~~~~~~~~~~~~~~~~~~~~
+
+The enqueue APIs include like ``rte_dmadev_copy`` and ``rte_dmadev_fill``, if
+enqueue successful, an uint16_t ring_idx is returned. This ring_idx can be used
+by applications to track per-operation metadata in an application defined
+circular ring.
+
+The ``rte_dmadev_submit`` API was used to issue doorbell to hardware, and also
+there are flags (``RTE_DMA_OP_FLAG_SUBMIT``) parameter of the enqueue APIs
+could do the same work.
+
+There are two dequeue APIs (``rte_dmadev_completed`` and
+``rte_dmadev_completed_status``) could used to obtain the result of request.
+The first API returns the number of operation requests completed successfully,
+the second API returns the number of operation requests completed which may
+successfully or failed and also with meaningful status code. Also these two
+APIs could return the last completed operation's ring_idx which will help to
+track application-defined circular ring.
diff --git a/doc/guides/prog_guide/img/dmadev.svg b/doc/guides/prog_guide/img/dmadev.svg
new file mode 100644
index 0000000..157d7eb
--- /dev/null
+++ b/doc/guides/prog_guide/img/dmadev.svg
@@ -0,0 +1,283 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<!-- SPDX-License-Identifier: BSD-3-Clause -->
+<!-- Copyright(c) 2021 HiSilicon Limited -->
+
+<svg
+   width="128.64288mm"
+   height="95.477707mm"
+   viewBox="0 0 192.96433 143.21656"
+   version="1.1"
+   id="svg934"
+   inkscape:version="1.1 (c68e22c387, 2021-05-23)"
+   sodipodi:docname="dmadev.svg"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg">
+  <sodipodi:namedview
+     id="namedview936"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageshadow="2"
+     inkscape:pageopacity="0.0"
+     inkscape:pagecheckerboard="0"
+     inkscape:document-units="mm"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:showpageshadow="false"
+     inkscape:zoom="1.332716"
+     inkscape:cx="335.03011"
+     inkscape:cy="143.69152"
+     inkscape:window-width="1920"
+     inkscape:window-height="976"
+     inkscape:window-x="-8"
+     inkscape:window-y="-8"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="layer1"
+     scale-x="1.5"
+     units="mm" />
+  <defs
+     id="defs931">
+    <rect
+       x="342.43954"
+       y="106.56832"
+       width="58.257381"
+       height="137.82834"
+       id="rect17873" />
+  </defs>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-0.13857517,-21.527306)">
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9"
+       width="50"
+       height="28"
+       x="0.13857517"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1"
+       transform="translate(-49.110795,15.205683)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1045">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1047">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5"
+       width="50"
+       height="28"
+       x="60.138577"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4"
+       transform="translate(10.512565,15.373298)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1049">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1051">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5-3"
+       width="50"
+       height="28"
+       x="137.43863"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-8"
+       transform="translate(88.79231,15.373299)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1053">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1055">channel</tspan></text>
+    <text
+       xml:space="preserve"
+       transform="matrix(0.26458333,0,0,0.26458333,-0.04940429,21.408845)"
+       id="text17871"
+       style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;white-space:pre;shape-inside:url(#rect17873);fill:#000000;fill-opacity:1;stroke:none" />
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8"
+       width="38.34557"
+       height="19.729115"
+       x="36.138577"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3"
+       transform="translate(-13.394978,59.135217)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1057">dmadev</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0"
+       width="60.902534"
+       height="24.616455"
+       x="25.196909"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76"
+       transform="translate(-24.485484,90.97883)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1059">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1061">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-6"
+       width="60.902534"
+       height="24.616455"
+       x="132.20036"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-7"
+       transform="translate(82.950904,90.79085)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1063">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1065">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-4"
+       width="60.902534"
+       height="24.616455"
+       x="76.810928"
+       y="140.12741"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-4"
+       transform="translate(27.032341,133.10574)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1067">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1069">controller</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8-5"
+       width="38.34557"
+       height="19.729115"
+       x="143.43863"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-7"
+       transform="translate(94.92597,59.664385)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1071">dmadev</tspan></text>
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 74.476373,49.527306 62.82407,64.827354"
+       id="path45308"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 35.924309,49.527306 47.711612,64.827354"
+       id="path45310"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 55.403414,84.556469 55.53332,98.47744"
+       id="path45312"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8"
+       inkscape:connection-end="#rect31-9-5-8-0" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.62241,84.556469 0.0155,13.920971"
+       id="path45320"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-5"
+       inkscape:connection-end="#rect31-9-5-8-0-6" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 146.28317,123.09389 -22.65252,17.03352"
+       id="path45586"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0-6"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 70.900938,123.09389 21.108496,17.03352"
+       id="path45588"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.50039,49.527306 0.0675,15.300048"
+       id="path45956"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-3"
+       inkscape:connection-end="#rect31-9-5-8-5" />
+  </g>
+</svg>
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 2dce507..0abea06 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -29,6 +29,7 @@ Programmer's Guide
     regexdev
     rte_security
     rawdev
+    dmadev
     link_bonding_poll_mode_drv_lib
     timer_lib
     hash_lib
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v14 6/6] maintainers: add for dmadev
  2021-08-10 11:54 ` [dpdk-dev] [PATCH v14 " Chengwen Feng
                     ` (4 preceding siblings ...)
  2021-08-10 11:54   ` [dpdk-dev] [PATCH v14 5/6] doc: add DMA device library guide Chengwen Feng
@ 2021-08-10 11:54   ` Chengwen Feng
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-10 11:54 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch add Chengwen Feng as dmadev's maintainer.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
 MAINTAINERS | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index 266f5ac..fd9feb1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -496,6 +496,11 @@ F: drivers/raw/skeleton/
 F: app/test/test_rawdev.c
 F: doc/guides/prog_guide/rawdev.rst
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+F: doc/guides/prog_guide/dmadev.rst
+
 
 Memory Pool Drivers
 -------------------
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v14 5/6] doc: add DMA device library guide
  2021-08-10 11:54   ` [dpdk-dev] [PATCH v14 5/6] doc: add DMA device library guide Chengwen Feng
@ 2021-08-10 15:27     ` Walsh, Conor
  2021-08-11  0:47       ` fengchengwen
  2021-08-13  9:20       ` fengchengwen
  0 siblings, 2 replies; 339+ messages in thread
From: Walsh, Conor @ 2021-08-10 15:27 UTC (permalink / raw)
  To: Chengwen Feng, thomas, Yigit, Ferruh, Richardson,  Bruce, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor, Ananyev,
	Konstantin

[snip]

Hi Chengwen,
I have included some feedback to improve the grammar and readability
of the docs inline.

> +Device Management
> +-----------------
> +
> +Device Creation
> +~~~~~~~~~~~~~~~
> +
> +Physical DMA controller is discovered during the PCI probe/enumeration of

^ "controllers are" instead of "controller is"

> the
> +EAL function which is executed at DPDK initialization, based on their PCI
> +device identifier, each unique PCI BDF (bus/bridge, device, function).

Change after the first , to the following "this is based on their PCI BDF
(bus/bridge, device, function)."

> Specific
> +physical DMA controller, like other physical devices in DPDK can be listed
> using

^ "controllers" instead of "controller"

> +the EAL command line options.
> +
> +And then dmadevs are dynamically allocated by

^ Change "And then" to "After DPDK initialization".

> rte_dmadev_pmd_allocate() based on
> +the number of hardware DMA channels.

[snip]

> +Device Features and Capabilities
> +--------------------------------
> +
> +DMA devices may support different feature set. In order to get the
> supported PMD

^ missing "s" at the end of line: "DMA devices may support different feature sets."

> +features ``rte_dmadev_info_get`` API which returns the info of the device
> and
> +it's supported features.

Replace "In order to get the supported PMD features rte_dmadev_info_get API which
returns the info of the device and it's supported features." with:
The ``rte_dmadev_info_get`` API can be used to get a devices info and supported features.

> +
> +A special device capability is silent mode which application don't required to
> +invoke dequeue APIs.

Replace the above sentence with:
"Silent mode is a special device capability which does not require the application
to invoke dequeue APIs."

> +
> +
> +Enqueue / Dequeue APIs
> +~~~~~~~~~~~~~~~~~~~~~~
> +
> +The enqueue APIs include like ``rte_dmadev_copy`` and ``rte_dmadev_fill``,
> if
> +enqueue successful, an uint16_t ring_idx is returned. This ring_idx can be
> used
> +by applications to track per-operation metadata in an application defined
> +circular ring.

Replace the enqueue paragraph with the following:
"Enqueue APIs such as ``rte_dmadev_copy`` and ``rte_dmadev_fill`` can be used
to enqueue operations to hardware. If an enqueue is successful, a ``ring_idx``
is returned. This ``ring_idx`` can be used by applications to track per-operation metadata
in an application-defined circular ring."

> +
> +The ``rte_dmadev_submit`` API was used to issue doorbell to hardware,
> and also
> +there are flags (``RTE_DMA_OP_FLAG_SUBMIT``) parameter of the
> enqueue APIs
> +could do the same work.

Replace submit line with this:
"The ``rte_dmadev_submit`` API is used to issue the doorbell to hardware.
Alternatively the ``RTE_DMA_OP_FLAG_SUBMIT`` flag can be passed to the
enqueue APIs to also issue the doorbell to hardware."

> +
> +There are two dequeue APIs (``rte_dmadev_completed`` and
> +``rte_dmadev_completed_status``) could used to obtain the result of
> request.

Replace the above sentence with:
"There are two dequeue APIs ``rte_dmadev_completed`` and
``rte_dmadev_completed_status``, these are used to obtain the
results of the enqueue requests."

> +The first API returns the number of operation requests completed
> successfully,
> +the second API returns the number of operation requests completed which
> may
> +successfully or failed and also with meaningful status code.

Replace above line with the following:
"``rte_dmadev_completed`` will return the number of successfully completed operations.
``rte_dmadev_completed_status`` will return the total number of completed operations
along with the status of each operation (filled into the ``status`` array passed by user)."

> Also these two
> +APIs could return the last completed operation's ring_idx which will help to
> +track application-defined circular ring.

Replace the last line with this:
"These two APIs can also return the last completed operations ``ring_idx`` which
could help developers track operations within their own application-defined rings."

With the improvements suggested above,
Acked-by: Conor Walsh <conor.walsh@intel.com>

Thanks,
Conor.

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v14 5/6] doc: add DMA device library guide
  2021-08-10 15:27     ` Walsh, Conor
@ 2021-08-11  0:47       ` fengchengwen
  2021-08-13  9:20       ` fengchengwen
  1 sibling, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-08-11  0:47 UTC (permalink / raw)
  To: Walsh, Conor, thomas, Yigit, Ferruh, Richardson,  Bruce, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor, Ananyev,
	Konstantin

Many thanks, will fix in v15

On 2021/8/10 23:27, Walsh, Conor wrote:
> [snip]
> 
> Hi Chengwen,
> I have included some feedback to improve the grammar and readability
> of the docs inline.
> 

[snip]



^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v15 0/6] support dmadev
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (17 preceding siblings ...)
  2021-08-10 11:54 ` [dpdk-dev] [PATCH v14 " Chengwen Feng
@ 2021-08-13  9:09 ` Chengwen Feng
  2021-08-13  9:09   ` [dpdk-dev] [PATCH v15 1/6] dmadev: introduce DMA device library public APIs Chengwen Feng
                     ` (5 more replies)
  2021-08-23  3:31 ` [dpdk-dev] [PATCH v16 0/9] support dmadev Chengwen Feng
                   ` (10 subsequent siblings)
  29 siblings, 6 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-13  9:09 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch set contains six patch for new add dmadev.

Chengwen Feng (6):
  dmadev: introduce DMA device library public APIs
  dmadev: introduce DMA device library internal header
  dmadev: introduce DMA device library PMD header
  dmadev: introduce DMA device library implementation
  doc: add DMA device library guide
  maintainers: add for dmadev

---
v15:
* fix typo and readability of prog_guide.
* fix some public APIs return value comment inconsistent with the impl.
* add return -ENOSPC comment if enqueue fail due to no space.
v14:
* rte_dmadev_vchan_setup add vchan parameter.
* rename max_vchans to nb_vchans of struct rte_dmadev_conf.
* fix dmadev programming guide doxygen warning.
v13:
* add dmadev_i1.svg.
* delete one unnecessary comment line of rte_dmadev_info_get.
v12:
* add max_sges filed for struct rte_dmadev_info.
* add more descriptor of dmadev.rst.
* replace scatter with scatter gather in code comment.
* split to six patch.
* fix typo.
v11:
* rename RTE_DMA_STATUS_UNKNOWN to RTE_DMA_STATUS_ERROR_UNKNOWN.
* add RTE_DMA_STATUS_INVALID_ADDR marco.
* update release-note.
* add acked-by for 1/2 patch.
* add dmadev programming guide which is 2/2 patch.
v10:
* fix rte_dmadev_completed_status comment.

 MAINTAINERS                            |    5 +
 config/rte_config.h                    |    3 +
 doc/api/doxy-api-index.md              |    1 +
 doc/api/doxy-api.conf.in               |    1 +
 doc/guides/prog_guide/dmadev.rst       |  125 ++++
 doc/guides/prog_guide/img/dmadev.svg   |  283 +++++++++
 doc/guides/prog_guide/index.rst        |    1 +
 doc/guides/rel_notes/release_21_11.rst |    6 +
 lib/dmadev/meson.build                 |    7 +
 lib/dmadev/rte_dmadev.c                |  567 +++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 1053 ++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h           |  182 ++++++
 lib/dmadev/rte_dmadev_pmd.h            |   72 +++
 lib/dmadev/version.map                 |   36 ++
 lib/meson.build                        |    1 +
 15 files changed, 2343 insertions(+)
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v15 1/6] dmadev: introduce DMA device library public APIs
  2021-08-13  9:09 ` [dpdk-dev] [PATCH v15 0/6] support dmadev Chengwen Feng
@ 2021-08-13  9:09   ` Chengwen Feng
  2021-08-19 14:52     ` Bruce Richardson
  2021-08-13  9:09   ` [dpdk-dev] [PATCH v15 2/6] dmadev: introduce DMA device library internal header Chengwen Feng
                     ` (4 subsequent siblings)
  5 siblings, 1 reply; 339+ messages in thread
From: Chengwen Feng @ 2021-08-13  9:09 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

The 'dmadevice' is a generic type of DMA device.

This patch introduce the 'dmadevice' public APIs which expose generic
operations that can enable configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
---
 doc/api/doxy-api-index.md |   1 +
 doc/api/doxy-api.conf.in  |   1 +
 lib/dmadev/meson.build    |   4 +
 lib/dmadev/rte_dmadev.h   | 957 ++++++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/version.map    |  25 ++
 lib/meson.build           |   1 +
 6 files changed, 989 insertions(+)
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/version.map

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107..ce08250 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -27,6 +27,7 @@ The public API headers are grouped by topics:
   [event_timer_adapter]    (@ref rte_event_timer_adapter.h),
   [event_crypto_adapter]   (@ref rte_event_crypto_adapter.h),
   [rawdev]             (@ref rte_rawdev.h),
+  [dmadev]             (@ref rte_dmadev.h),
   [metrics]            (@ref rte_metrics.h),
   [bitrate]            (@ref rte_bitrate.h),
   [latency]            (@ref rte_latencystats.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 325a019..a44a92b 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -34,6 +34,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/cmdline \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
+                          @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/distributor \
                           @TOPDIR@/lib/efd \
                           @TOPDIR@/lib/ethdev \
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000..6d5bd85
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+headers = files('rte_dmadev.h')
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000..1358e7d
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,957 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ * Copyright(c) 2021 Marvell International Ltd.
+ * Copyright(c) 2021 SmartShare Systems.
+ */
+
+#ifndef _RTE_DMADEV_H_
+#define _RTE_DMADEV_H_
+
+/**
+ * @file rte_dmadev.h
+ *
+ * RTE DMA (Direct Memory Access) device APIs.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW-DMA-channel |               | HW-DMA-channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW-DMA-Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel. e.g. Application could create
+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+ * virtual DMA channel 1 for memory-to-device transfer scenario.
+ *
+ * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
+ * PCI/SoC device probing phase performed at EAL initialization time. And could
+ * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
+ * phase.
+ *
+ * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dmadev_configure()
+ *     - rte_dmadev_vchan_setup()
+ *     - rte_dmadev_start()
+ *
+ * Then, the application can invoke dataplane APIs to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dmadev_configure() or rte_dmadev_vchan_setup()), it must invoke
+ * rte_dmadev_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dmadev_start() again. The dataplane APIs should not be
+ * invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the
+ * rte_dmadev_close() function.
+ *
+ * The dataplane APIs include two parts:
+ * The first part is the submission of operation requests:
+ *     - rte_dmadev_copy()
+ *     - rte_dmadev_copy_sg()
+ *     - rte_dmadev_fill()
+ *     - rte_dmadev_submit()
+ *
+ * These APIs could work with different virtual DMA channels which have
+ * different contexts.
+ *
+ * The first three APIs are used to submit the operation request to the virtual
+ * DMA channel, if the submission is successful, an uint16_t ring_idx is
+ * returned, otherwise a negative number is returned.
+ *
+ * The last API was used to issue doorbell to hardware, and also there are flags
+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
+ * same work.
+ *
+ * The second part is to obtain the result of requests:
+ *     - rte_dmadev_completed()
+ *         - return the number of operation requests completed successfully.
+ *     - rte_dmadev_completed_status()
+ *         - return the number of operation requests completed.
+ *
+ * @note If the dmadev works in silent mode (@see RTE_DMADEV_CAPA_SILENT),
+ * application does not invoke the above two completed APIs.
+ *
+ * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
+ * rte_dmadev_fill()) returned, the rules are as follows:
+ *     - ring_idx for each virtual DMA channel are independent.
+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *       when it reach UINT16_MAX, it wraps back to zero.
+ *     - This ring_idx can be used by applications to track per-operation
+ *       metadata in an application-defined circular ring.
+ *     - The initial ring_idx of a virtual DMA channel is zero, after the
+ *       device is stopped, the ring_idx needs to be reset to zero.
+ *
+ * One example:
+ *     - step-1: start one dmadev
+ *     - step-2: enqueue a copy operation, the ring_idx return is 0
+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1
+ *     - ...
+ *     - step-101: stop the dmadev
+ *     - step-102: start the dmadev
+ *     - step-103: enqueue a copy operation, the cookie return is 0
+ *     - ...
+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *
+ * The DMA operation address used in enqueue APIs (i.e. rte_dmadev_copy(),
+ * rte_dmadev_copy_sg(), rte_dmadev_fill()) defined as rte_iova_t type. The
+ * dmadev supports two types of address: memory address and device address.
+ *
+ * - memory address: the source and destination address of the memory-to-memory
+ * transfer type, or the source address of the memory-to-device transfer type,
+ * or the destination address of the device-to-memory transfer type.
+ * @note If the device support SVA (@see RTE_DMADEV_CAPA_SVA), the memory
+ * address can be any VA address, otherwise it must be an IOVA address.
+ *
+ * - device address: the source and destination address of the device-to-device
+ * transfer type, or the source address of the device-to-memory transfer type,
+ * or the destination address of the memory-to-device transfer type.
+ *
+ * By default, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
+ *
+ */
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the device identifier for the named DMA device.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   Returns DMA device identifier on success.
+ *   - <0: Failure to find named DMA device.
+ */
+__rte_experimental
+int
+rte_dmadev_get_dev_id(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_count(void);
+
+/* Enumerates DMA device capabilities. */
+#define RTE_DMADEV_CAPA_MEM_TO_MEM	(1ull << 0)
+/**< DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_MEM_TO_DEV	(1ull << 1)
+/**< DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_MEM	(1ull << 2)
+/**< DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_DEV	(1ull << 3)
+/**< DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_SVA		(1ull << 4)
+/**< DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_SILENT		(1ull << 5)
+/**< DMA device support work in silent mode.
+ * In this mode, application don't required to invoke rte_dmadev_completed*()
+ * API.
+ *
+ * @see struct rte_dmadev_conf::silent_mode
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY	(1ull << 32)
+/**< DMA device support copy ops.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY_SG	(1ull << 33)
+/**< DMA device support scatter-gather list copy ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_FILL	(1ull << 34)
+/**< DMA device support fill ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ */
+struct rte_dmadev_info {
+	struct rte_device *device; /**< Generic Device information. */
+	uint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channels supported. */
+	uint16_t max_desc;
+	/**< Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/**< Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t max_sges;
+	/**< Maximum number of source or destination scatter-gather entry
+	 * supported.
+	 * If the device does not support COPY_SG capability, this value can be
+	 * zero.
+	 * If the device supports COPY_SG capability, then rte_dmadev_copy_sg()
+	 * parameter nb_src/nb_dst should not exceed this value.
+	 */
+	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ */
+struct rte_dmadev_conf {
+	uint16_t nb_vchans;
+	/**< The number of virtual DMA channels to set up for the DMA device.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dmadev_info which get from rte_dmadev_info_get().
+	 */
+	bool enable_silent;
+	/**< Indicates whether to enable silent mode.
+	 * false-default mode, true-silent mode.
+	 * This value can be set to true only when the SILENT capability is
+	 * supported.
+	 *
+	 * @see RTE_DMADEV_CAPA_SILENT
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dmadev_conf
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_start(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dmadev_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_stop(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_close(uint16_t dev_id);
+
+/**
+ * rte_dma_direction - DMA transfer direction defines.
+ */
+enum rte_dma_direction {
+	RTE_DMA_DIR_MEM_TO_MEM,
+	/**< DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV,
+	/**< DMA transfer direction - from memory to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is SoCs memory) to device (which is host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM,
+	/**< DMA transfer direction - from device to memory.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to memory (which is SoCs memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV,
+	/**< DMA transfer direction - from device to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to the device (which is another host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+};
+
+/**
+ * enum rte_dmadev_port_type - DMA access port type defines.
+ *
+ * @see struct rte_dmadev_port_param::port_type
+ */
+enum rte_dmadev_port_type {
+	RTE_DMADEV_PORT_NONE,
+	RTE_DMADEV_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dmadev_vchan_conf::src_port
+ * @see struct rte_dmadev_vchan_conf::dst_port
+ */
+struct rte_dmadev_port_param {
+	enum rte_dmadev_port_type port_type;
+	/**< The device access port type.
+	 * @see enum rte_dmadev_port_type
+	 */
+	union {
+		/** PCIe access port parameters.
+		 *
+		 * The following model shows SoC's PCIe module connects to
+		 * multiple PCIe hosts and multiple endpoints. The PCIe module
+		 * has an integrated DMA controller.
+		 *
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * \code{.unparsed}
+		 * System Bus
+		 *    |     ----------PCIe module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIe Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIe Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIe Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * \endcode
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		struct {
+			uint64_t coreid : 4; /**< PCIe core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			uint64_t pasid : 20;
+			/**< The pasid filed in TLP packet. */
+			uint64_t attr : 3;
+			/**< The attributes filed in TLP packet. */
+			uint64_t ph : 2;
+			/**< The processing hint filed in TLP packet. */
+			uint64_t st : 16;
+			/**< The steering tag filed in TLP packet. */
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ */
+struct rte_dmadev_vchan_conf {
+	enum rte_dma_direction direction;
+	/**< Transfer direction
+	 * @see enum rte_dma_direction
+	 */
+	uint16_t nb_desc;
+	/**< Number of descriptor for the virtual DMA channel */
+	struct rte_dmadev_port_param src_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+	struct rte_dmadev_port_param dst_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel. The value must be in the range
+ *   [0, nb_vchans - 1] previously supplied to rte_dmadev_configure().
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dmadev_vchan_conf object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+		       const struct rte_dmadev_vchan_conf *conf);
+
+/**
+ * rte_dmadev_stats - running statistics.
+ */
+struct rte_dmadev_stats {
+	uint64_t submitted_count;
+	/**< Count of operations which were submitted to hardware. */
+	uint64_t completed_fail_count;
+	/**< Count of operations which failed to complete. */
+	uint64_t completed_count;
+	/**< Count of operations which successfully complete. */
+};
+
+#define RTE_DMADEV_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dmadev_stats
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger the dmadev self test.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - 0: selftest successful.
+ *   - -ENOTSUP: if the device doesn't support selftest.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_selftest(uint16_t dev_id);
+
+/**
+ * rte_dma_status_code - DMA transfer result status code defines.
+ */
+enum rte_dma_status_code {
+	RTE_DMA_STATUS_SUCCESSFUL,
+	/**< The operation completed successfully. */
+	RTE_DMA_STATUS_USER_ABORT,
+	/**< The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPTED,
+	/**< The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/**< The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/**< The operation failed to complete due invalid destination
+	 * address.
+	 */
+	RTE_DMA_STATUS_INVALID_ADDR,
+	/**< The operation failed to complete due invalid source or destination
+	 * address, cover the case that only knows the address error, but not
+	 * sure which address error.
+	 */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/**< The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/**< The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/**< The operation failed to complete due bus err. */
+	RTE_DMA_STATUS_DATA_POISION,
+	/**< The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/**< The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/**< The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the memory-to-device/
+	 * device-to-memory/device-to-device transfer scenario.
+	 */
+	RTE_DMA_STATUS_ERROR_UNKNOWN = 0x100,
+	/**< The operation failed to complete due unknown reason.
+	 * The initial value is 256, which reserves space for future errors.
+	 */
+};
+
+/**
+ * rte_dmadev_sge - can hold scatter-gather DMA operation request entry.
+ */
+struct rte_dmadev_sge {
+	rte_iova_t addr; /**< The DMA operation address. */
+	uint32_t length; /**< The DMA operation length. */
+};
+
+/* DMA flags to augment operation preparation. */
+#define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
+/**< DMA fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ * If the specify DMA HW works in-order (it means it has default fence between
+ * operations), this flag could be NOP.
+ *
+ * @see rte_dmadev_copy()
+ * @see rte_dmadev_copy_sg()
+ * @see rte_dmadev_fill()
+ */
+
+#define RTE_DMA_OP_FLAG_SUBMIT	(1ull << 1)
+/**< DMA submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ */
+
+#define RTE_DMA_OP_FLAG_LLC	(1ull << 2)
+/**< DMA write data to low level cache hint.
+ * Used for performance optimization, this is just a hint, and there is no
+ * capability bit for this, driver should not return error if this flag was set.
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+		uint32_t length, uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter-gather list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter-gather list copy operation to be performed by
+ * hardware, if the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then
+ * trigger doorbell to begin this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The pointer of source scatter-gather entry array.
+ * @param dst
+ *   The pointer of destination scatter-gather entry array.
+ * @param nb_src
+ *   The number of source scatter-gather entry.
+ *   @see struct rte_dmadev_info::max_sges
+ * @param nb_dst
+ *   The number of destination scatter-gather entry.
+ *   @see struct rte_dmadev_info::max_sges
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
+		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
+		   uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
+		rte_iova_t dst, uint32_t length, uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dmadev_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed. This return value
+ *   must be less than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		     uint16_t *last_idx, bool *has_error);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been completed, and the
+ * operations result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   This is a pointer to an array of length 'nb_cpls' that holds the completion
+ *   status code of each operation.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that completed. This return value must be less
+ *   than or equal to the value of nb_cpls.
+ *   If this number is greater than zero (assuming n), then n values in the
+ *   status array are also set.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
+			    const uint16_t nb_cpls, uint16_t *last_idx,
+			    enum rte_dma_status_code *status);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000..02fffe3
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,25 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dmadev_close;
+	rte_dmadev_completed;
+	rte_dmadev_completed_status;
+	rte_dmadev_configure;
+	rte_dmadev_copy;
+	rte_dmadev_copy_sg;
+	rte_dmadev_count;
+	rte_dmadev_dump;
+	rte_dmadev_fill;
+	rte_dmadev_get_dev_id;
+	rte_dmadev_info_get;
+	rte_dmadev_is_valid_dev;
+	rte_dmadev_selftest;
+	rte_dmadev_start;
+	rte_dmadev_stats_get;
+	rte_dmadev_stats_reset;
+	rte_dmadev_stop;
+	rte_dmadev_submit;
+	rte_dmadev_vchan_setup;
+
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4..a542c23 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -44,6 +44,7 @@ libraries = [
         'power',
         'pdump',
         'rawdev',
+        'dmadev',
         'regexdev',
         'rib',
         'reorder',
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v15 2/6] dmadev: introduce DMA device library internal header
  2021-08-13  9:09 ` [dpdk-dev] [PATCH v15 0/6] support dmadev Chengwen Feng
  2021-08-13  9:09   ` [dpdk-dev] [PATCH v15 1/6] dmadev: introduce DMA device library public APIs Chengwen Feng
@ 2021-08-13  9:09   ` Chengwen Feng
  2021-08-13  9:09   ` [dpdk-dev] [PATCH v15 3/6] dmadev: introduce DMA device library PMD header Chengwen Feng
                     ` (3 subsequent siblings)
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-13  9:09 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce DMA device library internal header, which contains
internal data types that are used by the DMA devices in order to expose
their ops to the class.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 lib/dmadev/meson.build       |   1 +
 lib/dmadev/rte_dmadev_core.h | 180 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 181 insertions(+)
 create mode 100644 lib/dmadev/rte_dmadev_core.h

diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index 6d5bd85..f421ec1 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -2,3 +2,4 @@
 # Copyright(c) 2021 HiSilicon Limited.
 
 headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000..ff7b70a
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#ifndef _RTE_DMADEV_CORE_H_
+#define _RTE_DMADEV_CORE_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device internal header.
+ *
+ * This header contains internal data types, that are used by the DMA devices
+ * in order to expose their ops to the class.
+ *
+ * Applications should not use these API directly.
+ *
+ */
+
+struct rte_dmadev;
+
+typedef int (*rte_dmadev_info_get_t)(const struct rte_dmadev *dev,
+				     struct rte_dmadev_info *dev_info,
+				     uint32_t info_sz);
+/**< @internal Used to get device information of a device. */
+
+typedef int (*rte_dmadev_configure_t)(struct rte_dmadev *dev,
+				      const struct rte_dmadev_conf *dev_conf);
+/**< @internal Used to configure a device. */
+
+typedef int (*rte_dmadev_start_t)(struct rte_dmadev *dev);
+/**< @internal Used to start a configured device. */
+
+typedef int (*rte_dmadev_stop_t)(struct rte_dmadev *dev);
+/**< @internal Used to stop a configured device. */
+
+typedef int (*rte_dmadev_close_t)(struct rte_dmadev *dev);
+/**< @internal Used to close a configured device. */
+
+typedef int (*rte_dmadev_vchan_setup_t)(struct rte_dmadev *dev, uint16_t vchan,
+				const struct rte_dmadev_vchan_conf *conf);
+/**< @internal Used to allocate and set up a virtual DMA channel. */
+
+typedef int (*rte_dmadev_stats_get_t)(const struct rte_dmadev *dev,
+			uint16_t vchan, struct rte_dmadev_stats *stats,
+			uint32_t stats_sz);
+/**< @internal Used to retrieve basic statistics. */
+
+typedef int (*rte_dmadev_stats_reset_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to reset basic statistics. */
+
+typedef int (*rte_dmadev_dump_t)(const struct rte_dmadev *dev, FILE *f);
+/**< @internal Used to dump internal information. */
+
+typedef int (*rte_dmadev_selftest_t)(uint16_t dev_id);
+/**< @internal Used to start dmadev selftest. */
+
+typedef int (*rte_dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 rte_iova_t src, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a copy operation. */
+
+typedef int (*rte_dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
+				    const struct rte_dmadev_sge *src,
+				    const struct rte_dmadev_sge *dst,
+				    uint16_t nb_src, uint16_t nb_dst,
+				    uint64_t flags);
+/**< @internal Used to enqueue a scatter-gather list copy operation. */
+
+typedef int (*rte_dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 uint64_t pattern, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a fill operation. */
+
+typedef int (*rte_dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to trigger hardware to begin working. */
+
+typedef uint16_t (*rte_dmadev_completed_t)(struct rte_dmadev *dev,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+/**< @internal Used to return number of successful completed operations. */
+
+typedef uint16_t (*rte_dmadev_completed_status_t)(struct rte_dmadev *dev,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+/**< @internal Used to return number of completed operations. */
+
+/**
+ * Possible states of a DMA device.
+ */
+enum rte_dmadev_state {
+	RTE_DMADEV_UNUSED = 0,
+	/**< Device is unused before being probed. */
+	RTE_DMADEV_ATTACHED,
+	/**< Device is attached when allocated in probing. */
+};
+
+/**
+ * DMA device operations function pointer table
+ */
+struct rte_dmadev_ops {
+	rte_dmadev_info_get_t dev_info_get;
+	rte_dmadev_configure_t dev_configure;
+	rte_dmadev_start_t dev_start;
+	rte_dmadev_stop_t dev_stop;
+	rte_dmadev_close_t dev_close;
+	rte_dmadev_vchan_setup_t vchan_setup;
+	rte_dmadev_stats_get_t stats_get;
+	rte_dmadev_stats_reset_t stats_reset;
+	rte_dmadev_dump_t dev_dump;
+	rte_dmadev_selftest_t dev_selftest;
+};
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_dmadev_data {
+	void *dev_private;
+	/**< PMD-specific private data.
+	 * This is a copy of the 'dev_private' field in the 'struct rte_dmadev'
+	 * from primary process, it is used by the secondary process to get
+	 * dev_private information.
+	 */
+	uint16_t dev_id; /**< Device [external] identifier. */
+	char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ *
+ * The dataplane APIs are located at the beginning of the structure, along
+ * with the pointer to where all the data elements for the particular device
+ * are stored in shared memory. This split scheme allows the function pointer
+ * and driver data to be per-process, while the actual configuration data for
+ * the device is shared.
+ * And the 'dev_private' field was placed in the first cache line to optimize
+ * performance because the PMD driver mainly depends on this field.
+ */
+struct rte_dmadev {
+	rte_dmadev_copy_t copy;
+	rte_dmadev_copy_sg_t copy_sg;
+	rte_dmadev_fill_t fill;
+	rte_dmadev_submit_t submit;
+	rte_dmadev_completed_t completed;
+	rte_dmadev_completed_status_t completed_status;
+	void *reserved_ptr; /**< Reserved for future IO function. */
+	void *dev_private;
+	/**< PMD-specific private data.
+	 *
+	 * - If is the primary process, after dmadev allocated by
+	 * rte_dmadev_pmd_allocate(), the PCI/SoC device probing should
+	 * initialize this field, and copy it's value to the 'dev_private'
+	 * field of 'struct rte_dmadev_data' which pointer by 'data' filed.
+	 *
+	 * - If is the secondary process, dmadev framework will initialize this
+	 * field by copy from 'dev_private' field of 'struct rte_dmadev_data'
+	 * which initialized by primary process.
+	 *
+	 * @note It's the primary process responsibility to deinitialize this
+	 * field after invoke rte_dmadev_pmd_release() in the PCI/SoC device
+	 * removing stage.
+	 */
+	struct rte_dmadev_data *data; /**< Pointer to device data. */
+	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
+	struct rte_device *device;
+	/**< Device info which supplied during device initialization. */
+	enum rte_dmadev_state state; /**< Flag indicating the device state. */
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+} __rte_cache_aligned;
+
+#endif /* _RTE_DMADEV_CORE_H_ */
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v15 3/6] dmadev: introduce DMA device library PMD header
  2021-08-13  9:09 ` [dpdk-dev] [PATCH v15 0/6] support dmadev Chengwen Feng
  2021-08-13  9:09   ` [dpdk-dev] [PATCH v15 1/6] dmadev: introduce DMA device library public APIs Chengwen Feng
  2021-08-13  9:09   ` [dpdk-dev] [PATCH v15 2/6] dmadev: introduce DMA device library internal header Chengwen Feng
@ 2021-08-13  9:09   ` Chengwen Feng
  2021-08-13  9:09   ` [dpdk-dev] [PATCH v15 4/6] dmadev: introduce DMA device library implementation Chengwen Feng
                     ` (2 subsequent siblings)
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-13  9:09 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce DMA device library PMD header which was driver
facing APIs for a DMA device.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 lib/dmadev/meson.build      |  1 +
 lib/dmadev/rte_dmadev.h     |  2 ++
 lib/dmadev/rte_dmadev_pmd.h | 72 +++++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/version.map      | 10 +++++++
 4 files changed, 85 insertions(+)
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h

diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index f421ec1..833baf7 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -3,3 +3,4 @@
 
 headers = files('rte_dmadev.h')
 indirect_headers += files('rte_dmadev_core.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 1358e7d..e693202 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -736,6 +736,8 @@ struct rte_dmadev_sge {
 	uint32_t length; /**< The DMA operation length. */
 };
 
+#include "rte_dmadev_core.h"
+
 /* DMA flags to augment operation preparation. */
 #define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
 /**< DMA fence flag.
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000..45141f9
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_PMD_H_
+#define _RTE_DMADEV_PMD_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device PMD APIs
+ *
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param dev
+ *   Device to be released.
+ *
+ * @return
+ *   - 0 on success, negative on error
+ */
+__rte_internal
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev);
+
+/**
+ * @internal
+ * Return the DMA device based on the device name.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_PMD_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index 02fffe3..408b93c 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -23,3 +23,13 @@ EXPERIMENTAL {
 
 	local: *;
 };
+
+INTERNAL {
+        global:
+
+	rte_dmadev_get_device_by_name;
+	rte_dmadev_pmd_allocate;
+	rte_dmadev_pmd_release;
+
+	local: *;
+};
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v15 4/6] dmadev: introduce DMA device library implementation
  2021-08-13  9:09 ` [dpdk-dev] [PATCH v15 0/6] support dmadev Chengwen Feng
                     ` (2 preceding siblings ...)
  2021-08-13  9:09   ` [dpdk-dev] [PATCH v15 3/6] dmadev: introduce DMA device library PMD header Chengwen Feng
@ 2021-08-13  9:09   ` Chengwen Feng
  2021-08-13  9:09   ` [dpdk-dev] [PATCH v15 5/6] doc: add DMA device library guide Chengwen Feng
  2021-08-13  9:09   ` [dpdk-dev] [PATCH v15 6/6] maintainers: add for dmadev Chengwen Feng
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-13  9:09 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce DMA device library implementation which includes
configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 config/rte_config.h          |   3 +
 lib/dmadev/meson.build       |   1 +
 lib/dmadev/rte_dmadev.c      | 567 +++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev.h      | 118 ++++++++-
 lib/dmadev/rte_dmadev_core.h |   2 +
 lib/dmadev/version.map       |   1 +
 6 files changed, 680 insertions(+), 12 deletions(-)
 create mode 100644 lib/dmadev/rte_dmadev.c

diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c..331a431 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -81,6 +81,9 @@
 /* rawdev defines */
 #define RTE_RAWDEV_MAX_DEVS 64
 
+/* dmadev defines */
+#define RTE_DMADEV_MAX_DEVS 64
+
 /* ip_fragmentation defines */
 #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
 #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index 833baf7..d2fc85e 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2021 HiSilicon Limited.
 
+sources = files('rte_dmadev.c')
 headers = files('rte_dmadev.h')
 indirect_headers += files('rte_dmadev_core.h')
 driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000..80be485
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,567 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
+
+static const char *mz_rte_dmadev_data = "rte_dmadev_data";
+/* Shared memory between primary and secondary processes. */
+static struct {
+	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
+} *dmadev_shared_data;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dmadev_logtype, INFO);
+#define RTE_DMADEV_LOG(level, ...) \
+	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "" __VA_ARGS__)
+
+/* Macros to check for valid device id */
+#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dmadev_is_valid_dev(dev_id)) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+static int
+dmadev_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMADEV_LOG(ERR, "Name can't be NULL\n");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMADEV_LOG(ERR, "Zero length DMA device name\n");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
+		RTE_DMADEV_LOG(ERR, "DMA device name is too long\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint16_t
+dmadev_find_free_dev(void)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (dmadev_shared_data->data[i].dev_name[0] == '\0')
+			return i;
+	}
+
+	return RTE_DMADEV_MAX_DEVS;
+}
+
+static struct rte_dmadev*
+dmadev_find(const char *name)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&
+		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
+			return &rte_dmadevices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dmadev_shared_data_prepare(void)
+{
+	const struct rte_memzone *mz;
+
+	if (dmadev_shared_data == NULL) {
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			/* Allocate port data and ownership shared memory. */
+			mz = rte_memzone_reserve(mz_rte_dmadev_data,
+					 sizeof(*dmadev_shared_data),
+					 rte_socket_id(), 0);
+		} else
+			mz = rte_memzone_lookup(mz_rte_dmadev_data);
+		if (mz == NULL)
+			return -ENOMEM;
+
+		dmadev_shared_data = mz->addr;
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+			memset(dmadev_shared_data->data, 0,
+			       sizeof(dmadev_shared_data->data));
+	}
+
+	return 0;
+}
+
+static struct rte_dmadev *
+dmadev_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t dev_id;
+
+	dev = dmadev_find(name);
+	if (dev != NULL) {
+		RTE_DMADEV_LOG(ERR, "DMA device already allocated\n");
+		return NULL;
+	}
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	dev_id = dmadev_find_free_dev();
+	if (dev_id == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices\n");
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+	dev->data = &dmadev_shared_data->data[dev_id];
+	dev->data->dev_id = dev_id;
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+
+	return dev;
+}
+
+static struct rte_dmadev *
+dmadev_attach_secondary(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t i;
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %s is not driven by the primary process\n",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[i];
+	dev->data = &dmadev_shared_data->data[i];
+	dev->dev_private = dev->data->dev_private;
+
+	return dev;
+}
+
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dmadev_allocate(name);
+	else
+		dev = dmadev_attach_secondary(name);
+
+	if (dev == NULL)
+		return NULL;
+	dev->state = RTE_DMADEV_ATTACHED;
+
+	return dev;
+}
+
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev)
+{
+	void *dev_private_tmp;
+
+	if (dev == NULL)
+		return -EINVAL;
+
+	if (dev->state == RTE_DMADEV_UNUSED)
+		return 0;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
+
+	dev_private_tmp = dev->dev_private;
+	memset(dev, 0, sizeof(struct rte_dmadev));
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev->dev_private = dev_private_tmp;
+	dev->state = RTE_DMADEV_UNUSED;
+
+	return 0;
+}
+
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name)
+{
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+	return dmadev_find(name);
+}
+
+int
+rte_dmadev_get_dev_id(const char *name)
+{
+	struct rte_dmadev *dev = rte_dmadev_get_device_by_name(name);
+	if (dev != NULL)
+		return dev->data->dev_id;
+	return -EINVAL;
+}
+
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id)
+{
+	return (dev_id < RTE_DMADEV_MAX_DEVS) &&
+		rte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED;
+}
+
+uint16_t
+rte_dmadev_count(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
+			count++;
+	}
+
+	return count;
+}
+
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dmadev_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->device = dev->device;
+	dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
+
+	return 0;
+}
+
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans == 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure zero vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans > info.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure too many vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_silent &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_SILENT)) {
+		RTE_DMADEV_LOG(ERR, "Device %u don't support silent\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf);
+	if (ret == 0)
+		memcpy(&dev->data->dev_conf, dev_conf, sizeof(*dev_conf));
+
+	return ret;
+}
+
+int
+rte_dmadev_start(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already started\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->data->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dmadev_stop(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started == 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already stopped\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dmadev_close(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped before closing\n", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	return (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+		       const struct rte_dmadev_vchan_conf *conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	bool src_is_dev, dst_is_dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (vchan >= info.nb_vchans) {
+		RTE_DMADEV_LOG(ERR, "Device %u vchan out range!\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u number of descriptors invalid\n", dev_id);
+		return -EINVAL;
+	}
+	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->src_port.port_type == RTE_DMADEV_PORT_NONE && src_is_dev) ||
+	    (conf->src_port.port_type != RTE_DMADEV_PORT_NONE && !src_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u source port type invalid\n", dev_id);
+		return -EINVAL;
+	}
+	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->dst_port.port_type == RTE_DMADEV_PORT_NONE && dst_is_dev) ||
+	    (conf->dst_port.port_type != RTE_DMADEV_PORT_NONE && !dst_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u destination port type invalid\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, vchan, conf);
+}
+
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (stats == NULL)
+		return -EINVAL;
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	memset(stats, 0, sizeof(struct rte_dmadev_stats));
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dmadev_stats));
+}
+
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (f == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+
+	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
+	fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
+	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
+	fprintf(f, "  nb_vchans_configured: %u\n", info.nb_vchans);
+	fprintf(f, "  silent_mode: %s\n",
+		dev->data->dev_conf.enable_silent ? "on" : "off");
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
+
+int
+rte_dmadev_selftest(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
+	return (*dev->dev_ops->dev_selftest)(dev_id);
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index e693202..8f3f670 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -793,9 +793,21 @@ struct rte_dmadev_sge {
  *   - other values < 0 on failure.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
-		uint32_t length, uint64_t flags);
+		uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+#endif
+
+	return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
 
 /**
  * @warning
@@ -831,10 +843,23 @@ rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
  *   - other values < 0 on failure.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
 		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
-		   uint64_t flags);
+		   uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans ||
+	    src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+#endif
+
+	return (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);
+}
 
 /**
  * @warning
@@ -866,9 +891,21 @@ rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
  *   - other values < 0 on failure.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
-		rte_iova_t dst, uint32_t length, uint64_t flags);
+		rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+#endif
+
+	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
 
 /**
  * @warning
@@ -888,8 +925,20 @@ rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
  *   0 on success. Otherwise negative value is returned.
  */
 __rte_experimental
-int
-rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
+static inline int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+#endif
+
+	return (*dev->submit)(dev, vchan);
+}
 
 /**
  * @warning
@@ -915,9 +964,37 @@ rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
  *   must be less than or equal to the value of nb_cpls.
  */
 __rte_experimental
-uint16_t
+static inline uint16_t
 rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
-		     uint16_t *last_idx, bool *has_error);
+		     uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans || nb_cpls == 0)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
 
 /**
  * @warning
@@ -947,10 +1024,27 @@ rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
  *   status array are also set.
  */
 __rte_experimental
-uint16_t
+static inline uint16_t
 rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
 			    const uint16_t nb_cpls, uint16_t *last_idx,
-			    enum rte_dma_status_code *status);
+			    enum rte_dma_status_code *status)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans ||
+	    nb_cpls == 0 || status == NULL)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
+}
 
 #ifdef __cplusplus
 }
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index ff7b70a..aa8e622 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -177,4 +177,6 @@ struct rte_dmadev {
 	uint64_t reserved[2]; /**< Reserved for future fields. */
 } __rte_cache_aligned;
 
+extern struct rte_dmadev rte_dmadevices[];
+
 #endif /* _RTE_DMADEV_CORE_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index 408b93c..86c5e75 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -27,6 +27,7 @@ EXPERIMENTAL {
 INTERNAL {
         global:
 
+	rte_dmadevices;
 	rte_dmadev_get_device_by_name;
 	rte_dmadev_pmd_allocate;
 	rte_dmadev_pmd_release;
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v15 5/6] doc: add DMA device library guide
  2021-08-13  9:09 ` [dpdk-dev] [PATCH v15 0/6] support dmadev Chengwen Feng
                     ` (3 preceding siblings ...)
  2021-08-13  9:09   ` [dpdk-dev] [PATCH v15 4/6] dmadev: introduce DMA device library implementation Chengwen Feng
@ 2021-08-13  9:09   ` Chengwen Feng
  2021-08-13  9:09   ` [dpdk-dev] [PATCH v15 6/6] maintainers: add for dmadev Chengwen Feng
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-13  9:09 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch adds dmadev library guide.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Conor Walsh <conor.walsh@intel.com>
---
 doc/guides/prog_guide/dmadev.rst     | 125 ++++++++++++++++
 doc/guides/prog_guide/img/dmadev.svg | 283 +++++++++++++++++++++++++++++++++++
 doc/guides/prog_guide/index.rst      |   1 +
 3 files changed, 409 insertions(+)
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg

diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
new file mode 100644
index 0000000..75bac04
--- /dev/null
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -0,0 +1,125 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright 2021 HiSilicon Limited
+
+DMA Device Library
+====================
+
+The DMA library provides a DMA device framework for management and provisioning
+of hardware and software DMA poll mode drivers, defining generic APIs which
+support a number of different DMA operations.
+
+
+Design Principles
+-----------------
+
+The DMA library follows the same basic principles as those used in DPDK's
+Ethernet Device framework and the RegEx framework. The DMA framework provides
+a generic DMA device framework which supports both physical (hardware)
+and virtual (software) DMA devices as well as a generic DMA API which allows
+DMA devices to be managed and configured and supports DMA operations to be
+provisioned on DMA poll mode driver.
+
+.. _figure_dmadev:
+
+.. figure:: img/dmadev.*
+
+The above figure shows the model on which the DMA framework is built on:
+
+ * The DMA controller could have multiple hardware DMA channels (aka. hardware
+   DMA queues), each hardware DMA channel should be represented by a dmadev.
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+   channel represents a different transfer context. The DMA operation request
+   must be submitted to the virtual DMA channel. e.g. Application could create
+   virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+   virtual DMA channel 1 for memory-to-device transfer scenario.
+
+
+Device Management
+-----------------
+
+Device Creation
+~~~~~~~~~~~~~~~
+
+Physical DMA controllers are discovered during the PCI probe/enumeration of the
+EAL function which is executed at DPDK initialization, this is based on their
+PCI BDF (bus/bridge, device, function). Specific physical DMA controllers, like
+other physical devices in DPDK can be listed using the EAL command line options.
+
+The dmadevs are dynamically allocated by using the API
+``rte_dmadev_pmd_allocate`` based on the number of hardware DMA channels.
+
+
+Device Identification
+~~~~~~~~~~~~~~~~~~~~~
+
+Each DMA device, whether physical or virtual is uniquely designated by two
+identifiers:
+
+- A unique device index used to designate the DMA device in all functions
+  exported by the DMA API.
+
+- A device name used to designate the DMA device in console messages, for
+  administration or debugging purposes.
+
+
+Device Configuration
+~~~~~~~~~~~~~~~~~~~~
+
+The rte_dmadev_configure API is used to configure a DMA device.
+
+.. code-block:: c
+
+   int rte_dmadev_configure(uint16_t dev_id,
+                            const struct rte_dmadev_conf *dev_conf);
+
+The ``rte_dmadev_conf`` structure is used to pass the configuration parameters
+for the DMA device for example the number of virtual DMA channels to set up,
+indication of whether to enable silent mode.
+
+
+Configuration of Virtual DMA Channels
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The rte_dmadev_vchan_setup API is used to configure a virtual DMA channel.
+
+.. code-block:: c
+
+   int rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+                              const struct rte_dmadev_vchan_conf *conf);
+
+The ``rte_dmadev_vchan_conf`` structure is used to pass the configuration
+parameters for the virtual DMA channel for example transfer direction, number of
+descriptor for the virtual DMA channel, source device access port parameter,
+destination device access port parameter.
+
+
+Device Features and Capabilities
+--------------------------------
+
+DMA devices may support different feature sets. The ``rte_dmadev_info_get`` API
+can be used to get the device info and supported features.
+
+Silent mode is a special device capability which does not require the
+application to invoke dequeue APIs.
+
+
+Enqueue / Dequeue APIs
+~~~~~~~~~~~~~~~~~~~~~~
+
+Enqueue APIs such as ``rte_dmadev_copy`` and ``rte_dmadev_fill`` can be used to
+enqueue operations to hardware. If an enqueue is successful, a ``ring_idx`` is
+returned. This ``ring_idx`` can be used by applications to track per-operation
+metadata in an application-defined circular ring.
+
+The ``rte_dmadev_submit`` API is used to issue doorbell to hardware.
+Alternatively the ``RTE_DMA_OP_FLAG_SUBMIT`` flag can be passed to the enqueue
+APIs to also issue the doorbell to hardware.
+
+There are two dequeue APIs ``rte_dmadev_completed`` and
+``rte_dmadev_completed_status``, these are used to obtain the results of
+the enqueue requests. ``rte_dmadev_completed`` will return the number of
+successfully completed operations. ``rte_dmadev_completed_status`` will return
+the number of completed operations along with the status of each operation
+(filled into the ``status`` array passed by user). These two APIs can also
+return the last completed operation's ``ring_idx`` which could help user track
+operations within their own application-defined rings.
diff --git a/doc/guides/prog_guide/img/dmadev.svg b/doc/guides/prog_guide/img/dmadev.svg
new file mode 100644
index 0000000..157d7eb
--- /dev/null
+++ b/doc/guides/prog_guide/img/dmadev.svg
@@ -0,0 +1,283 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<!-- SPDX-License-Identifier: BSD-3-Clause -->
+<!-- Copyright(c) 2021 HiSilicon Limited -->
+
+<svg
+   width="128.64288mm"
+   height="95.477707mm"
+   viewBox="0 0 192.96433 143.21656"
+   version="1.1"
+   id="svg934"
+   inkscape:version="1.1 (c68e22c387, 2021-05-23)"
+   sodipodi:docname="dmadev.svg"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg">
+  <sodipodi:namedview
+     id="namedview936"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageshadow="2"
+     inkscape:pageopacity="0.0"
+     inkscape:pagecheckerboard="0"
+     inkscape:document-units="mm"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:showpageshadow="false"
+     inkscape:zoom="1.332716"
+     inkscape:cx="335.03011"
+     inkscape:cy="143.69152"
+     inkscape:window-width="1920"
+     inkscape:window-height="976"
+     inkscape:window-x="-8"
+     inkscape:window-y="-8"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="layer1"
+     scale-x="1.5"
+     units="mm" />
+  <defs
+     id="defs931">
+    <rect
+       x="342.43954"
+       y="106.56832"
+       width="58.257381"
+       height="137.82834"
+       id="rect17873" />
+  </defs>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-0.13857517,-21.527306)">
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9"
+       width="50"
+       height="28"
+       x="0.13857517"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1"
+       transform="translate(-49.110795,15.205683)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1045">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1047">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5"
+       width="50"
+       height="28"
+       x="60.138577"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4"
+       transform="translate(10.512565,15.373298)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1049">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1051">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5-3"
+       width="50"
+       height="28"
+       x="137.43863"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-8"
+       transform="translate(88.79231,15.373299)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1053">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1055">channel</tspan></text>
+    <text
+       xml:space="preserve"
+       transform="matrix(0.26458333,0,0,0.26458333,-0.04940429,21.408845)"
+       id="text17871"
+       style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;white-space:pre;shape-inside:url(#rect17873);fill:#000000;fill-opacity:1;stroke:none" />
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8"
+       width="38.34557"
+       height="19.729115"
+       x="36.138577"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3"
+       transform="translate(-13.394978,59.135217)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1057">dmadev</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0"
+       width="60.902534"
+       height="24.616455"
+       x="25.196909"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76"
+       transform="translate(-24.485484,90.97883)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1059">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1061">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-6"
+       width="60.902534"
+       height="24.616455"
+       x="132.20036"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-7"
+       transform="translate(82.950904,90.79085)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1063">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1065">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-4"
+       width="60.902534"
+       height="24.616455"
+       x="76.810928"
+       y="140.12741"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-4"
+       transform="translate(27.032341,133.10574)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1067">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1069">controller</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8-5"
+       width="38.34557"
+       height="19.729115"
+       x="143.43863"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-7"
+       transform="translate(94.92597,59.664385)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1071">dmadev</tspan></text>
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 74.476373,49.527306 62.82407,64.827354"
+       id="path45308"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 35.924309,49.527306 47.711612,64.827354"
+       id="path45310"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 55.403414,84.556469 55.53332,98.47744"
+       id="path45312"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8"
+       inkscape:connection-end="#rect31-9-5-8-0" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.62241,84.556469 0.0155,13.920971"
+       id="path45320"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-5"
+       inkscape:connection-end="#rect31-9-5-8-0-6" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 146.28317,123.09389 -22.65252,17.03352"
+       id="path45586"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0-6"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 70.900938,123.09389 21.108496,17.03352"
+       id="path45588"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.50039,49.527306 0.0675,15.300048"
+       id="path45956"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-3"
+       inkscape:connection-end="#rect31-9-5-8-5" />
+  </g>
+</svg>
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 2dce507..0abea06 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -29,6 +29,7 @@ Programmer's Guide
     regexdev
     rte_security
     rawdev
+    dmadev
     link_bonding_poll_mode_drv_lib
     timer_lib
     hash_lib
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v15 6/6] maintainers: add for dmadev
  2021-08-13  9:09 ` [dpdk-dev] [PATCH v15 0/6] support dmadev Chengwen Feng
                     ` (4 preceding siblings ...)
  2021-08-13  9:09   ` [dpdk-dev] [PATCH v15 5/6] doc: add DMA device library guide Chengwen Feng
@ 2021-08-13  9:09   ` Chengwen Feng
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-13  9:09 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch add myself as dmadev's maintainer and update release notes.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
 MAINTAINERS                            | 5 +++++
 doc/guides/rel_notes/release_21_11.rst | 6 ++++++
 2 files changed, 11 insertions(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index 266f5ac..fd9feb1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -496,6 +496,11 @@ F: drivers/raw/skeleton/
 F: app/test/test_rawdev.c
 F: doc/guides/prog_guide/rawdev.rst
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+F: doc/guides/prog_guide/dmadev.rst
+
 
 Memory Pool Drivers
 -------------------
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index d707a55..0d3c38f 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -55,6 +55,12 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Added dmadev library support.**
+
+  The dmadev library provides a DMA device framework for management and
+  provisioning of hardware and software DMA poll mode drivers, defining generic
+  APIs which support a number of different DMA operations.
+
 
 Removed Items
 -------------
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v14 5/6] doc: add DMA device library guide
  2021-08-10 15:27     ` Walsh, Conor
  2021-08-11  0:47       ` fengchengwen
@ 2021-08-13  9:20       ` fengchengwen
  2021-08-13 10:12         ` Walsh, Conor
  1 sibling, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-08-13  9:20 UTC (permalink / raw)
  To: Walsh, Conor, thomas, Yigit, Ferruh, Richardson,  Bruce, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor, Ananyev,
	Konstantin

Hi Conor,

Already sent v15, Most of feedback modified, unmodified inline

Thanks.

On 2021/8/10 23:27, Walsh, Conor wrote:
> [snip]
> 
> Hi Chengwen,
> I have included some feedback to improve the grammar and readability
> of the docs inline.
> 

[snip]

>> +the EAL command line options.
>> +
>> +And then dmadevs are dynamically allocated by
> 
> ^ Change "And then" to "After DPDK initialization".

It is part of DPDK initialization, I modify by other ways.

> 
>> rte_dmadev_pmd_allocate() based on
>> +the number of hardware DMA channels.

[snip]

>> +The first API returns the number of operation requests completed
>> successfully,
>> +the second API returns the number of operation requests completed which
>> may
>> +successfully or failed and also with meaningful status code.
> 
> Replace above line with the following:
> "``rte_dmadev_completed`` will return the number of successfully completed operations.
> ``rte_dmadev_completed_status`` will return the total number of completed operations

I remove the 'total' because the return limited by nb_cpls.

> along with the status of each operation (filled into the ``status`` array passed by user)>
>> Also these two
>> +APIs could return the last completed operation's ring_idx which will help to
>> +track application-defined circular ring.
> 
> Replace the last line with this:
> "These two APIs can also return the last completed operations ``ring_idx`` which
> could help developers track operations within their own application-defined rings."

I change the developers to user.

> 
> With the improvements suggested above,
> Acked-by: Conor Walsh <conor.walsh@intel.com>
> 
> Thanks,
> Conor.
> .
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v14 5/6] doc: add DMA device library guide
  2021-08-13  9:20       ` fengchengwen
@ 2021-08-13 10:12         ` Walsh, Conor
  0 siblings, 0 replies; 339+ messages in thread
From: Walsh, Conor @ 2021-08-13 10:12 UTC (permalink / raw)
  To: fengchengwen, thomas, Yigit, Ferruh, Richardson,  Bruce, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor, Ananyev,
	Konstantin

Hi Chengwen,

v15 looks good to me.

Thanks for the changes,
Conor.

> Hi Conor,
> 
> Already sent v15, Most of feedback modified, unmodified inline
> 
> Thanks.
> 
> On 2021/8/10 23:27, Walsh, Conor wrote:
> > [snip]
> >
> > Hi Chengwen,
> > I have included some feedback to improve the grammar and readability
> > of the docs inline.
> >
> 
> [snip]
> 
> >> +the EAL command line options.
> >> +
> >> +And then dmadevs are dynamically allocated by
> >
> > ^ Change "And then" to "After DPDK initialization".
> 
> It is part of DPDK initialization, I modify by other ways.
> 
> >
> >> rte_dmadev_pmd_allocate() based on
> >> +the number of hardware DMA channels.
> 
> [snip]
> 
> >> +The first API returns the number of operation requests completed
> >> successfully,
> >> +the second API returns the number of operation requests completed
> which
> >> may
> >> +successfully or failed and also with meaningful status code.
> >
> > Replace above line with the following:
> > "``rte_dmadev_completed`` will return the number of successfully
> completed operations.
> > ``rte_dmadev_completed_status`` will return the total number of
> completed operations
> 
> I remove the 'total' because the return limited by nb_cpls.
> 
> > along with the status of each operation (filled into the ``status`` array
> passed by user)>
> >> Also these two
> >> +APIs could return the last completed operation's ring_idx which will help
> to
> >> +track application-defined circular ring.
> >
> > Replace the last line with this:
> > "These two APIs can also return the last completed operations ``ring_idx``
> which
> > could help developers track operations within their own application-
> defined rings."
> 
> I change the developers to user.
> 
> >
> > With the improvements suggested above,
> > Acked-by: Conor Walsh <conor.walsh@intel.com>
> >
> > Thanks,
> > Conor.
> > .
> >

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v15 1/6] dmadev: introduce DMA device library public APIs
  2021-08-13  9:09   ` [dpdk-dev] [PATCH v15 1/6] dmadev: introduce DMA device library public APIs Chengwen Feng
@ 2021-08-19 14:52     ` Bruce Richardson
  2021-08-23  3:43       ` fengchengwen
  0 siblings, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-08-19 14:52 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

On Fri, Aug 13, 2021 at 05:09:29PM +0800, Chengwen Feng wrote:
> The 'dmadevice' is a generic type of DMA device.
> 
> This patch introduce the 'dmadevice' public APIs which expose generic
> operations that can enable configuration and I/O with the DMA devices.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
> ---
one minor comment for clarification

> +/**
> + * rte_dmadev_stats - running statistics.
> + */
> +struct rte_dmadev_stats {
> +	uint64_t submitted_count;
> +	/**< Count of operations which were submitted to hardware. */
> +	uint64_t completed_fail_count;
> +	/**< Count of operations which failed to complete. */
> +	uint64_t completed_count;
> +	/**< Count of operations which successfully complete. */
> +};

The name of the last variable and the comment on it seem mismatched. The
name implies that it's all completed ops, i.e. to get successful only you
do "stats.completed_count - stats.completed_fail_count", while the comment
says that it's successful only. Therefore I suggest:

* We rename the last two vars to "completed_fail" and "completed_success"
  for clarity OR
* We redefine "completed_count" to be the full completed count of both
  success and failure.

I have a slight preference for the latter option, but either can work.

/Bruce

PS: We probably don't need "count" on any of these values, based on two
options above suggest structs as:

  struct rte_dmadev_stats {
	uint64_t submitted;
	uint64_t failed;
	uint64_t successful;
  };

OR:

  struct rte_dmadev_stats {
	uint64_t submitted;
	uint64_t completed;
	uint64_t errors;
  }

^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v16 0/9] support dmadev
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (18 preceding siblings ...)
  2021-08-13  9:09 ` [dpdk-dev] [PATCH v15 0/6] support dmadev Chengwen Feng
@ 2021-08-23  3:31 ` Chengwen Feng
  2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 1/9] dmadev: introduce DMA device library public APIs Chengwen Feng
                     ` (8 more replies)
  2021-08-28  7:29 ` [dpdk-dev] [PATCH v17 0/8] support dmadev Chengwen Feng
                   ` (9 subsequent siblings)
  29 siblings, 9 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-23  3:31 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch set contains nine patch for new add dmadev.

Chengwen Feng (9):
  dmadev: introduce DMA device library public APIs
  dmadev: introduce DMA device library internal header
  dmadev: introduce DMA device library PMD header
  dmadev: introduce DMA device library implementation
  doc: add DMA device library guide
  dma/skeleton: introduce skeleton dmadev driver
  dma/skeleton: add test cases
  test: enable dmadev skeleton test
  maintainers: add for dmadev

---
v16:
* redefine struct rte_dmadev_stats with fields:
  submitted, completed, errors.
* add dma skeleton.
* add dmadev ut.
v15:
* fix typo and readability of prog_guide.
* fix some public APIs return value comment inconsistent with the impl.
* add return -ENOSPC comment if enqueue fail due to no space.
v14:
* rte_dmadev_vchan_setup add vchan parameter.
* rename max_vchans to nb_vchans of struct rte_dmadev_conf.
* fix dmadev programming guide doxygen warning.
v13:
* add dmadev_i1.svg.
* delete one unnecessary comment line of rte_dmadev_info_get.
v12:
* add max_sges filed for struct rte_dmadev_info.
* add more descriptor of dmadev.rst.
* replace scatter with scatter gather in code comment.
* split to six patch.
* fix typo.
v11:
* rename RTE_DMA_STATUS_UNKNOWN to RTE_DMA_STATUS_ERROR_UNKNOWN.
* add RTE_DMA_STATUS_INVALID_ADDR marco.
* update release-note.
* add acked-by for 1/2 patch.
* add dmadev programming guide which is 2/2 patch.

 MAINTAINERS                                 |    7 +
 app/test/meson.build                        |    3 +
 app/test/test_dmadev.c                      |   53 ++
 config/rte_config.h                         |    3 +
 doc/api/doxy-api-index.md                   |    1 +
 doc/api/doxy-api.conf.in                    |    1 +
 doc/guides/prog_guide/dmadev.rst            |  125 ++++
 doc/guides/prog_guide/img/dmadev.svg        |  283 +++++++
 doc/guides/prog_guide/index.rst             |    1 +
 doc/guides/rel_notes/release_21_11.rst      |    6 +
 drivers/dma/meson.build                     |   11 +
 drivers/dma/skeleton/meson.build            |    8 +
 drivers/dma/skeleton/skeleton_dmadev.c      |  621 ++++++++++++++++
 drivers/dma/skeleton/skeleton_dmadev.h      |   76 ++
 drivers/dma/skeleton/skeleton_dmadev_test.c |  521 +++++++++++++
 drivers/dma/skeleton/version.map            |    3 +
 drivers/meson.build                         |    1 +
 lib/dmadev/meson.build                      |    7 +
 lib/dmadev/rte_dmadev.c                     |  567 +++++++++++++++
 lib/dmadev/rte_dmadev.h                     | 1053 +++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h                |  182 +++++
 lib/dmadev/rte_dmadev_pmd.h                 |   72 ++
 lib/dmadev/version.map                      |   36 +
 lib/meson.build                             |    1 +
 24 files changed, 3642 insertions(+)
 create mode 100644 app/test/test_dmadev.c
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg
 create mode 100644 drivers/dma/meson.build
 create mode 100644 drivers/dma/skeleton/meson.build
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev_test.c
 create mode 100644 drivers/dma/skeleton/version.map
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v16 1/9] dmadev: introduce DMA device library public APIs
  2021-08-23  3:31 ` [dpdk-dev] [PATCH v16 0/9] support dmadev Chengwen Feng
@ 2021-08-23  3:31   ` Chengwen Feng
  2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 2/9] dmadev: introduce DMA device library internal header Chengwen Feng
                     ` (7 subsequent siblings)
  8 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-23  3:31 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

The 'dmadevice' is a generic type of DMA device.

This patch introduce the 'dmadevice' public APIs which expose generic
operations that can enable configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
---
 doc/api/doxy-api-index.md |   1 +
 doc/api/doxy-api.conf.in  |   1 +
 lib/dmadev/meson.build    |   4 +
 lib/dmadev/rte_dmadev.h   | 957 ++++++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/version.map    |  25 ++
 lib/meson.build           |   1 +
 6 files changed, 989 insertions(+)
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/version.map

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107..ce08250 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -27,6 +27,7 @@ The public API headers are grouped by topics:
   [event_timer_adapter]    (@ref rte_event_timer_adapter.h),
   [event_crypto_adapter]   (@ref rte_event_crypto_adapter.h),
   [rawdev]             (@ref rte_rawdev.h),
+  [dmadev]             (@ref rte_dmadev.h),
   [metrics]            (@ref rte_metrics.h),
   [bitrate]            (@ref rte_bitrate.h),
   [latency]            (@ref rte_latencystats.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 325a019..a44a92b 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -34,6 +34,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/cmdline \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
+                          @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/distributor \
                           @TOPDIR@/lib/efd \
                           @TOPDIR@/lib/ethdev \
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000..6d5bd85
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+headers = files('rte_dmadev.h')
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000..a008ee0
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,957 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ * Copyright(c) 2021 Marvell International Ltd.
+ * Copyright(c) 2021 SmartShare Systems.
+ */
+
+#ifndef _RTE_DMADEV_H_
+#define _RTE_DMADEV_H_
+
+/**
+ * @file rte_dmadev.h
+ *
+ * RTE DMA (Direct Memory Access) device APIs.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW-DMA-channel |               | HW-DMA-channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW-DMA-Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel. e.g. Application could create
+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+ * virtual DMA channel 1 for memory-to-device transfer scenario.
+ *
+ * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
+ * PCI/SoC device probing phase performed at EAL initialization time. And could
+ * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
+ * phase.
+ *
+ * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dmadev_configure()
+ *     - rte_dmadev_vchan_setup()
+ *     - rte_dmadev_start()
+ *
+ * Then, the application can invoke dataplane APIs to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dmadev_configure() or rte_dmadev_vchan_setup()), it must invoke
+ * rte_dmadev_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dmadev_start() again. The dataplane APIs should not be
+ * invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the
+ * rte_dmadev_close() function.
+ *
+ * The dataplane APIs include two parts:
+ * The first part is the submission of operation requests:
+ *     - rte_dmadev_copy()
+ *     - rte_dmadev_copy_sg()
+ *     - rte_dmadev_fill()
+ *     - rte_dmadev_submit()
+ *
+ * These APIs could work with different virtual DMA channels which have
+ * different contexts.
+ *
+ * The first three APIs are used to submit the operation request to the virtual
+ * DMA channel, if the submission is successful, an uint16_t ring_idx is
+ * returned, otherwise a negative number is returned.
+ *
+ * The last API was used to issue doorbell to hardware, and also there are flags
+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
+ * same work.
+ *
+ * The second part is to obtain the result of requests:
+ *     - rte_dmadev_completed()
+ *         - return the number of operation requests completed successfully.
+ *     - rte_dmadev_completed_status()
+ *         - return the number of operation requests completed.
+ *
+ * @note If the dmadev works in silent mode (@see RTE_DMADEV_CAPA_SILENT),
+ * application does not invoke the above two completed APIs.
+ *
+ * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
+ * rte_dmadev_fill()) returned, the rules are as follows:
+ *     - ring_idx for each virtual DMA channel are independent.
+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *       when it reach UINT16_MAX, it wraps back to zero.
+ *     - This ring_idx can be used by applications to track per-operation
+ *       metadata in an application-defined circular ring.
+ *     - The initial ring_idx of a virtual DMA channel is zero, after the
+ *       device is stopped, the ring_idx needs to be reset to zero.
+ *
+ * One example:
+ *     - step-1: start one dmadev
+ *     - step-2: enqueue a copy operation, the ring_idx return is 0
+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1
+ *     - ...
+ *     - step-101: stop the dmadev
+ *     - step-102: start the dmadev
+ *     - step-103: enqueue a copy operation, the cookie return is 0
+ *     - ...
+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *
+ * The DMA operation address used in enqueue APIs (i.e. rte_dmadev_copy(),
+ * rte_dmadev_copy_sg(), rte_dmadev_fill()) defined as rte_iova_t type. The
+ * dmadev supports two types of address: memory address and device address.
+ *
+ * - memory address: the source and destination address of the memory-to-memory
+ * transfer type, or the source address of the memory-to-device transfer type,
+ * or the destination address of the device-to-memory transfer type.
+ * @note If the device support SVA (@see RTE_DMADEV_CAPA_SVA), the memory
+ * address can be any VA address, otherwise it must be an IOVA address.
+ *
+ * - device address: the source and destination address of the device-to-device
+ * transfer type, or the source address of the device-to-memory transfer type,
+ * or the destination address of the memory-to-device transfer type.
+ *
+ * By default, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
+ *
+ */
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the device identifier for the named DMA device.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   Returns DMA device identifier on success.
+ *   - <0: Failure to find named DMA device.
+ */
+__rte_experimental
+int
+rte_dmadev_get_dev_id(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_count(void);
+
+/* Enumerates DMA device capabilities. */
+#define RTE_DMADEV_CAPA_MEM_TO_MEM	(1ull << 0)
+/**< DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_MEM_TO_DEV	(1ull << 1)
+/**< DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_MEM	(1ull << 2)
+/**< DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_DEV	(1ull << 3)
+/**< DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_SVA		(1ull << 4)
+/**< DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_SILENT		(1ull << 5)
+/**< DMA device support work in silent mode.
+ * In this mode, application don't required to invoke rte_dmadev_completed*()
+ * API.
+ *
+ * @see struct rte_dmadev_conf::silent_mode
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY	(1ull << 32)
+/**< DMA device support copy ops.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY_SG	(1ull << 33)
+/**< DMA device support scatter-gather list copy ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_FILL	(1ull << 34)
+/**< DMA device support fill ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ */
+struct rte_dmadev_info {
+	struct rte_device *device; /**< Generic Device information. */
+	uint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channels supported. */
+	uint16_t max_desc;
+	/**< Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/**< Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t max_sges;
+	/**< Maximum number of source or destination scatter-gather entry
+	 * supported.
+	 * If the device does not support COPY_SG capability, this value can be
+	 * zero.
+	 * If the device supports COPY_SG capability, then rte_dmadev_copy_sg()
+	 * parameter nb_src/nb_dst should not exceed this value.
+	 */
+	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ */
+struct rte_dmadev_conf {
+	uint16_t nb_vchans;
+	/**< The number of virtual DMA channels to set up for the DMA device.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dmadev_info which get from rte_dmadev_info_get().
+	 */
+	bool enable_silent;
+	/**< Indicates whether to enable silent mode.
+	 * false-default mode, true-silent mode.
+	 * This value can be set to true only when the SILENT capability is
+	 * supported.
+	 *
+	 * @see RTE_DMADEV_CAPA_SILENT
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dmadev_conf
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_start(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dmadev_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_stop(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_close(uint16_t dev_id);
+
+/**
+ * rte_dma_direction - DMA transfer direction defines.
+ */
+enum rte_dma_direction {
+	RTE_DMA_DIR_MEM_TO_MEM,
+	/**< DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV,
+	/**< DMA transfer direction - from memory to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is SoCs memory) to device (which is host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM,
+	/**< DMA transfer direction - from device to memory.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to memory (which is SoCs memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV,
+	/**< DMA transfer direction - from device to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to the device (which is another host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+};
+
+/**
+ * enum rte_dmadev_port_type - DMA access port type defines.
+ *
+ * @see struct rte_dmadev_port_param::port_type
+ */
+enum rte_dmadev_port_type {
+	RTE_DMADEV_PORT_NONE,
+	RTE_DMADEV_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dmadev_vchan_conf::src_port
+ * @see struct rte_dmadev_vchan_conf::dst_port
+ */
+struct rte_dmadev_port_param {
+	enum rte_dmadev_port_type port_type;
+	/**< The device access port type.
+	 * @see enum rte_dmadev_port_type
+	 */
+	union {
+		/** PCIe access port parameters.
+		 *
+		 * The following model shows SoC's PCIe module connects to
+		 * multiple PCIe hosts and multiple endpoints. The PCIe module
+		 * has an integrated DMA controller.
+		 *
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * \code{.unparsed}
+		 * System Bus
+		 *    |     ----------PCIe module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIe Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIe Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIe Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * \endcode
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		struct {
+			uint64_t coreid : 4; /**< PCIe core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			uint64_t pasid : 20;
+			/**< The pasid filed in TLP packet. */
+			uint64_t attr : 3;
+			/**< The attributes filed in TLP packet. */
+			uint64_t ph : 2;
+			/**< The processing hint filed in TLP packet. */
+			uint64_t st : 16;
+			/**< The steering tag filed in TLP packet. */
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ */
+struct rte_dmadev_vchan_conf {
+	enum rte_dma_direction direction;
+	/**< Transfer direction
+	 * @see enum rte_dma_direction
+	 */
+	uint16_t nb_desc;
+	/**< Number of descriptor for the virtual DMA channel */
+	struct rte_dmadev_port_param src_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+	struct rte_dmadev_port_param dst_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel. The value must be in the range
+ *   [0, nb_vchans - 1] previously supplied to rte_dmadev_configure().
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dmadev_vchan_conf object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+		       const struct rte_dmadev_vchan_conf *conf);
+
+/**
+ * rte_dmadev_stats - running statistics.
+ */
+struct rte_dmadev_stats {
+	uint64_t submitted;
+	/**< Count of operations which were submitted to hardware. */
+	uint64_t completed;
+	/**< Count of operations which were completed. */
+	uint64_t errors;
+	/**< Count of operations which failed to complete. */
+};
+
+#define RTE_DMADEV_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dmadev_stats
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger the dmadev self test.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - 0: selftest successful.
+ *   - -ENOTSUP: if the device doesn't support selftest.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_selftest(uint16_t dev_id);
+
+/**
+ * rte_dma_status_code - DMA transfer result status code defines.
+ */
+enum rte_dma_status_code {
+	RTE_DMA_STATUS_SUCCESSFUL,
+	/**< The operation completed successfully. */
+	RTE_DMA_STATUS_USER_ABORT,
+	/**< The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPTED,
+	/**< The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/**< The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/**< The operation failed to complete due invalid destination
+	 * address.
+	 */
+	RTE_DMA_STATUS_INVALID_ADDR,
+	/**< The operation failed to complete due invalid source or destination
+	 * address, cover the case that only knows the address error, but not
+	 * sure which address error.
+	 */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/**< The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/**< The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/**< The operation failed to complete due bus err. */
+	RTE_DMA_STATUS_DATA_POISION,
+	/**< The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/**< The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/**< The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the memory-to-device/
+	 * device-to-memory/device-to-device transfer scenario.
+	 */
+	RTE_DMA_STATUS_ERROR_UNKNOWN = 0x100,
+	/**< The operation failed to complete due unknown reason.
+	 * The initial value is 256, which reserves space for future errors.
+	 */
+};
+
+/**
+ * rte_dmadev_sge - can hold scatter-gather DMA operation request entry.
+ */
+struct rte_dmadev_sge {
+	rte_iova_t addr; /**< The DMA operation address. */
+	uint32_t length; /**< The DMA operation length. */
+};
+
+/* DMA flags to augment operation preparation. */
+#define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
+/**< DMA fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ * If the specify DMA HW works in-order (it means it has default fence between
+ * operations), this flag could be NOP.
+ *
+ * @see rte_dmadev_copy()
+ * @see rte_dmadev_copy_sg()
+ * @see rte_dmadev_fill()
+ */
+
+#define RTE_DMA_OP_FLAG_SUBMIT	(1ull << 1)
+/**< DMA submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ */
+
+#define RTE_DMA_OP_FLAG_LLC	(1ull << 2)
+/**< DMA write data to low level cache hint.
+ * Used for performance optimization, this is just a hint, and there is no
+ * capability bit for this, driver should not return error if this flag was set.
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+		uint32_t length, uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter-gather list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter-gather list copy operation to be performed by
+ * hardware, if the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then
+ * trigger doorbell to begin this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The pointer of source scatter-gather entry array.
+ * @param dst
+ *   The pointer of destination scatter-gather entry array.
+ * @param nb_src
+ *   The number of source scatter-gather entry.
+ *   @see struct rte_dmadev_info::max_sges
+ * @param nb_dst
+ *   The number of destination scatter-gather entry.
+ *   @see struct rte_dmadev_info::max_sges
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
+		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
+		   uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
+		rte_iova_t dst, uint32_t length, uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dmadev_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed. This return value
+ *   must be less than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		     uint16_t *last_idx, bool *has_error);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been completed, and the
+ * operations result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   This is a pointer to an array of length 'nb_cpls' that holds the completion
+ *   status code of each operation.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that completed. This return value must be less
+ *   than or equal to the value of nb_cpls.
+ *   If this number is greater than zero (assuming n), then n values in the
+ *   status array are also set.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
+			    const uint16_t nb_cpls, uint16_t *last_idx,
+			    enum rte_dma_status_code *status);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000..02fffe3
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,25 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dmadev_close;
+	rte_dmadev_completed;
+	rte_dmadev_completed_status;
+	rte_dmadev_configure;
+	rte_dmadev_copy;
+	rte_dmadev_copy_sg;
+	rte_dmadev_count;
+	rte_dmadev_dump;
+	rte_dmadev_fill;
+	rte_dmadev_get_dev_id;
+	rte_dmadev_info_get;
+	rte_dmadev_is_valid_dev;
+	rte_dmadev_selftest;
+	rte_dmadev_start;
+	rte_dmadev_stats_get;
+	rte_dmadev_stats_reset;
+	rte_dmadev_stop;
+	rte_dmadev_submit;
+	rte_dmadev_vchan_setup;
+
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4..a542c23 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -44,6 +44,7 @@ libraries = [
         'power',
         'pdump',
         'rawdev',
+        'dmadev',
         'regexdev',
         'rib',
         'reorder',
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v16 2/9] dmadev: introduce DMA device library internal header
  2021-08-23  3:31 ` [dpdk-dev] [PATCH v16 0/9] support dmadev Chengwen Feng
  2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 1/9] dmadev: introduce DMA device library public APIs Chengwen Feng
@ 2021-08-23  3:31   ` Chengwen Feng
  2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 3/9] dmadev: introduce DMA device library PMD header Chengwen Feng
                     ` (6 subsequent siblings)
  8 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-23  3:31 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce DMA device library internal header, which contains
internal data types that are used by the DMA devices in order to expose
their ops to the class.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 lib/dmadev/meson.build       |   1 +
 lib/dmadev/rte_dmadev_core.h | 180 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 181 insertions(+)
 create mode 100644 lib/dmadev/rte_dmadev_core.h

diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index 6d5bd85..f421ec1 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -2,3 +2,4 @@
 # Copyright(c) 2021 HiSilicon Limited.
 
 headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000..ff7b70a
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#ifndef _RTE_DMADEV_CORE_H_
+#define _RTE_DMADEV_CORE_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device internal header.
+ *
+ * This header contains internal data types, that are used by the DMA devices
+ * in order to expose their ops to the class.
+ *
+ * Applications should not use these API directly.
+ *
+ */
+
+struct rte_dmadev;
+
+typedef int (*rte_dmadev_info_get_t)(const struct rte_dmadev *dev,
+				     struct rte_dmadev_info *dev_info,
+				     uint32_t info_sz);
+/**< @internal Used to get device information of a device. */
+
+typedef int (*rte_dmadev_configure_t)(struct rte_dmadev *dev,
+				      const struct rte_dmadev_conf *dev_conf);
+/**< @internal Used to configure a device. */
+
+typedef int (*rte_dmadev_start_t)(struct rte_dmadev *dev);
+/**< @internal Used to start a configured device. */
+
+typedef int (*rte_dmadev_stop_t)(struct rte_dmadev *dev);
+/**< @internal Used to stop a configured device. */
+
+typedef int (*rte_dmadev_close_t)(struct rte_dmadev *dev);
+/**< @internal Used to close a configured device. */
+
+typedef int (*rte_dmadev_vchan_setup_t)(struct rte_dmadev *dev, uint16_t vchan,
+				const struct rte_dmadev_vchan_conf *conf);
+/**< @internal Used to allocate and set up a virtual DMA channel. */
+
+typedef int (*rte_dmadev_stats_get_t)(const struct rte_dmadev *dev,
+			uint16_t vchan, struct rte_dmadev_stats *stats,
+			uint32_t stats_sz);
+/**< @internal Used to retrieve basic statistics. */
+
+typedef int (*rte_dmadev_stats_reset_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to reset basic statistics. */
+
+typedef int (*rte_dmadev_dump_t)(const struct rte_dmadev *dev, FILE *f);
+/**< @internal Used to dump internal information. */
+
+typedef int (*rte_dmadev_selftest_t)(uint16_t dev_id);
+/**< @internal Used to start dmadev selftest. */
+
+typedef int (*rte_dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 rte_iova_t src, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a copy operation. */
+
+typedef int (*rte_dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
+				    const struct rte_dmadev_sge *src,
+				    const struct rte_dmadev_sge *dst,
+				    uint16_t nb_src, uint16_t nb_dst,
+				    uint64_t flags);
+/**< @internal Used to enqueue a scatter-gather list copy operation. */
+
+typedef int (*rte_dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 uint64_t pattern, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a fill operation. */
+
+typedef int (*rte_dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to trigger hardware to begin working. */
+
+typedef uint16_t (*rte_dmadev_completed_t)(struct rte_dmadev *dev,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+/**< @internal Used to return number of successful completed operations. */
+
+typedef uint16_t (*rte_dmadev_completed_status_t)(struct rte_dmadev *dev,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+/**< @internal Used to return number of completed operations. */
+
+/**
+ * Possible states of a DMA device.
+ */
+enum rte_dmadev_state {
+	RTE_DMADEV_UNUSED = 0,
+	/**< Device is unused before being probed. */
+	RTE_DMADEV_ATTACHED,
+	/**< Device is attached when allocated in probing. */
+};
+
+/**
+ * DMA device operations function pointer table
+ */
+struct rte_dmadev_ops {
+	rte_dmadev_info_get_t dev_info_get;
+	rte_dmadev_configure_t dev_configure;
+	rte_dmadev_start_t dev_start;
+	rte_dmadev_stop_t dev_stop;
+	rte_dmadev_close_t dev_close;
+	rte_dmadev_vchan_setup_t vchan_setup;
+	rte_dmadev_stats_get_t stats_get;
+	rte_dmadev_stats_reset_t stats_reset;
+	rte_dmadev_dump_t dev_dump;
+	rte_dmadev_selftest_t dev_selftest;
+};
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_dmadev_data {
+	void *dev_private;
+	/**< PMD-specific private data.
+	 * This is a copy of the 'dev_private' field in the 'struct rte_dmadev'
+	 * from primary process, it is used by the secondary process to get
+	 * dev_private information.
+	 */
+	uint16_t dev_id; /**< Device [external] identifier. */
+	char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ *
+ * The dataplane APIs are located at the beginning of the structure, along
+ * with the pointer to where all the data elements for the particular device
+ * are stored in shared memory. This split scheme allows the function pointer
+ * and driver data to be per-process, while the actual configuration data for
+ * the device is shared.
+ * And the 'dev_private' field was placed in the first cache line to optimize
+ * performance because the PMD driver mainly depends on this field.
+ */
+struct rte_dmadev {
+	rte_dmadev_copy_t copy;
+	rte_dmadev_copy_sg_t copy_sg;
+	rte_dmadev_fill_t fill;
+	rte_dmadev_submit_t submit;
+	rte_dmadev_completed_t completed;
+	rte_dmadev_completed_status_t completed_status;
+	void *reserved_ptr; /**< Reserved for future IO function. */
+	void *dev_private;
+	/**< PMD-specific private data.
+	 *
+	 * - If is the primary process, after dmadev allocated by
+	 * rte_dmadev_pmd_allocate(), the PCI/SoC device probing should
+	 * initialize this field, and copy it's value to the 'dev_private'
+	 * field of 'struct rte_dmadev_data' which pointer by 'data' filed.
+	 *
+	 * - If is the secondary process, dmadev framework will initialize this
+	 * field by copy from 'dev_private' field of 'struct rte_dmadev_data'
+	 * which initialized by primary process.
+	 *
+	 * @note It's the primary process responsibility to deinitialize this
+	 * field after invoke rte_dmadev_pmd_release() in the PCI/SoC device
+	 * removing stage.
+	 */
+	struct rte_dmadev_data *data; /**< Pointer to device data. */
+	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
+	struct rte_device *device;
+	/**< Device info which supplied during device initialization. */
+	enum rte_dmadev_state state; /**< Flag indicating the device state. */
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+} __rte_cache_aligned;
+
+#endif /* _RTE_DMADEV_CORE_H_ */
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v16 3/9] dmadev: introduce DMA device library PMD header
  2021-08-23  3:31 ` [dpdk-dev] [PATCH v16 0/9] support dmadev Chengwen Feng
  2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 1/9] dmadev: introduce DMA device library public APIs Chengwen Feng
  2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 2/9] dmadev: introduce DMA device library internal header Chengwen Feng
@ 2021-08-23  3:31   ` Chengwen Feng
  2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 4/9] dmadev: introduce DMA device library implementation Chengwen Feng
                     ` (5 subsequent siblings)
  8 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-23  3:31 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce DMA device library PMD header which was driver
facing APIs for a DMA device.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 lib/dmadev/meson.build      |  1 +
 lib/dmadev/rte_dmadev.h     |  2 ++
 lib/dmadev/rte_dmadev_pmd.h | 72 +++++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/version.map      | 10 +++++++
 4 files changed, 85 insertions(+)
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h

diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index f421ec1..833baf7 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -3,3 +3,4 @@
 
 headers = files('rte_dmadev.h')
 indirect_headers += files('rte_dmadev_core.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index a008ee0..0744afa 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -736,6 +736,8 @@ struct rte_dmadev_sge {
 	uint32_t length; /**< The DMA operation length. */
 };
 
+#include "rte_dmadev_core.h"
+
 /* DMA flags to augment operation preparation. */
 #define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
 /**< DMA fence flag.
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000..45141f9
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_PMD_H_
+#define _RTE_DMADEV_PMD_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device PMD APIs
+ *
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param dev
+ *   Device to be released.
+ *
+ * @return
+ *   - 0 on success, negative on error
+ */
+__rte_internal
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev);
+
+/**
+ * @internal
+ * Return the DMA device based on the device name.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_PMD_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index 02fffe3..408b93c 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -23,3 +23,13 @@ EXPERIMENTAL {
 
 	local: *;
 };
+
+INTERNAL {
+        global:
+
+	rte_dmadev_get_device_by_name;
+	rte_dmadev_pmd_allocate;
+	rte_dmadev_pmd_release;
+
+	local: *;
+};
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v16 4/9] dmadev: introduce DMA device library implementation
  2021-08-23  3:31 ` [dpdk-dev] [PATCH v16 0/9] support dmadev Chengwen Feng
                     ` (2 preceding siblings ...)
  2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 3/9] dmadev: introduce DMA device library PMD header Chengwen Feng
@ 2021-08-23  3:31   ` Chengwen Feng
  2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 5/9] doc: add DMA device library guide Chengwen Feng
                     ` (4 subsequent siblings)
  8 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-23  3:31 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce DMA device library implementation which includes
configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 config/rte_config.h          |   3 +
 lib/dmadev/meson.build       |   1 +
 lib/dmadev/rte_dmadev.c      | 567 +++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev.h      | 118 ++++++++-
 lib/dmadev/rte_dmadev_core.h |   2 +
 lib/dmadev/version.map       |   1 +
 6 files changed, 680 insertions(+), 12 deletions(-)
 create mode 100644 lib/dmadev/rte_dmadev.c

diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c..331a431 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -81,6 +81,9 @@
 /* rawdev defines */
 #define RTE_RAWDEV_MAX_DEVS 64
 
+/* dmadev defines */
+#define RTE_DMADEV_MAX_DEVS 64
+
 /* ip_fragmentation defines */
 #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
 #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index 833baf7..d2fc85e 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2021 HiSilicon Limited.
 
+sources = files('rte_dmadev.c')
 headers = files('rte_dmadev.h')
 indirect_headers += files('rte_dmadev_core.h')
 driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000..80be485
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,567 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
+
+static const char *mz_rte_dmadev_data = "rte_dmadev_data";
+/* Shared memory between primary and secondary processes. */
+static struct {
+	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
+} *dmadev_shared_data;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dmadev_logtype, INFO);
+#define RTE_DMADEV_LOG(level, ...) \
+	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "" __VA_ARGS__)
+
+/* Macros to check for valid device id */
+#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dmadev_is_valid_dev(dev_id)) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+static int
+dmadev_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMADEV_LOG(ERR, "Name can't be NULL\n");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMADEV_LOG(ERR, "Zero length DMA device name\n");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
+		RTE_DMADEV_LOG(ERR, "DMA device name is too long\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint16_t
+dmadev_find_free_dev(void)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (dmadev_shared_data->data[i].dev_name[0] == '\0')
+			return i;
+	}
+
+	return RTE_DMADEV_MAX_DEVS;
+}
+
+static struct rte_dmadev*
+dmadev_find(const char *name)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&
+		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
+			return &rte_dmadevices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dmadev_shared_data_prepare(void)
+{
+	const struct rte_memzone *mz;
+
+	if (dmadev_shared_data == NULL) {
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			/* Allocate port data and ownership shared memory. */
+			mz = rte_memzone_reserve(mz_rte_dmadev_data,
+					 sizeof(*dmadev_shared_data),
+					 rte_socket_id(), 0);
+		} else
+			mz = rte_memzone_lookup(mz_rte_dmadev_data);
+		if (mz == NULL)
+			return -ENOMEM;
+
+		dmadev_shared_data = mz->addr;
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+			memset(dmadev_shared_data->data, 0,
+			       sizeof(dmadev_shared_data->data));
+	}
+
+	return 0;
+}
+
+static struct rte_dmadev *
+dmadev_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t dev_id;
+
+	dev = dmadev_find(name);
+	if (dev != NULL) {
+		RTE_DMADEV_LOG(ERR, "DMA device already allocated\n");
+		return NULL;
+	}
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	dev_id = dmadev_find_free_dev();
+	if (dev_id == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices\n");
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+	dev->data = &dmadev_shared_data->data[dev_id];
+	dev->data->dev_id = dev_id;
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+
+	return dev;
+}
+
+static struct rte_dmadev *
+dmadev_attach_secondary(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t i;
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %s is not driven by the primary process\n",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[i];
+	dev->data = &dmadev_shared_data->data[i];
+	dev->dev_private = dev->data->dev_private;
+
+	return dev;
+}
+
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dmadev_allocate(name);
+	else
+		dev = dmadev_attach_secondary(name);
+
+	if (dev == NULL)
+		return NULL;
+	dev->state = RTE_DMADEV_ATTACHED;
+
+	return dev;
+}
+
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev)
+{
+	void *dev_private_tmp;
+
+	if (dev == NULL)
+		return -EINVAL;
+
+	if (dev->state == RTE_DMADEV_UNUSED)
+		return 0;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
+
+	dev_private_tmp = dev->dev_private;
+	memset(dev, 0, sizeof(struct rte_dmadev));
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev->dev_private = dev_private_tmp;
+	dev->state = RTE_DMADEV_UNUSED;
+
+	return 0;
+}
+
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name)
+{
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+	return dmadev_find(name);
+}
+
+int
+rte_dmadev_get_dev_id(const char *name)
+{
+	struct rte_dmadev *dev = rte_dmadev_get_device_by_name(name);
+	if (dev != NULL)
+		return dev->data->dev_id;
+	return -EINVAL;
+}
+
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id)
+{
+	return (dev_id < RTE_DMADEV_MAX_DEVS) &&
+		rte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED;
+}
+
+uint16_t
+rte_dmadev_count(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
+			count++;
+	}
+
+	return count;
+}
+
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dmadev_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->device = dev->device;
+	dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
+
+	return 0;
+}
+
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans == 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure zero vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans > info.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure too many vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_silent &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_SILENT)) {
+		RTE_DMADEV_LOG(ERR, "Device %u don't support silent\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf);
+	if (ret == 0)
+		memcpy(&dev->data->dev_conf, dev_conf, sizeof(*dev_conf));
+
+	return ret;
+}
+
+int
+rte_dmadev_start(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already started\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->data->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dmadev_stop(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started == 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already stopped\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dmadev_close(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped before closing\n", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	return (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+		       const struct rte_dmadev_vchan_conf *conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	bool src_is_dev, dst_is_dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (vchan >= info.nb_vchans) {
+		RTE_DMADEV_LOG(ERR, "Device %u vchan out range!\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u number of descriptors invalid\n", dev_id);
+		return -EINVAL;
+	}
+	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->src_port.port_type == RTE_DMADEV_PORT_NONE && src_is_dev) ||
+	    (conf->src_port.port_type != RTE_DMADEV_PORT_NONE && !src_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u source port type invalid\n", dev_id);
+		return -EINVAL;
+	}
+	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->dst_port.port_type == RTE_DMADEV_PORT_NONE && dst_is_dev) ||
+	    (conf->dst_port.port_type != RTE_DMADEV_PORT_NONE && !dst_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u destination port type invalid\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, vchan, conf);
+}
+
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (stats == NULL)
+		return -EINVAL;
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	memset(stats, 0, sizeof(struct rte_dmadev_stats));
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dmadev_stats));
+}
+
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (f == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+
+	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
+	fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
+	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
+	fprintf(f, "  nb_vchans_configured: %u\n", info.nb_vchans);
+	fprintf(f, "  silent_mode: %s\n",
+		dev->data->dev_conf.enable_silent ? "on" : "off");
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
+
+int
+rte_dmadev_selftest(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_selftest, -ENOTSUP);
+	return (*dev->dev_ops->dev_selftest)(dev_id);
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 0744afa..cf9e4bf 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -793,9 +793,21 @@ struct rte_dmadev_sge {
  *   - other values < 0 on failure.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
-		uint32_t length, uint64_t flags);
+		uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+#endif
+
+	return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
 
 /**
  * @warning
@@ -831,10 +843,23 @@ rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
  *   - other values < 0 on failure.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
 		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
-		   uint64_t flags);
+		   uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans ||
+	    src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+#endif
+
+	return (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);
+}
 
 /**
  * @warning
@@ -866,9 +891,21 @@ rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
  *   - other values < 0 on failure.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
-		rte_iova_t dst, uint32_t length, uint64_t flags);
+		rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+#endif
+
+	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
 
 /**
  * @warning
@@ -888,8 +925,20 @@ rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
  *   0 on success. Otherwise negative value is returned.
  */
 __rte_experimental
-int
-rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
+static inline int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+#endif
+
+	return (*dev->submit)(dev, vchan);
+}
 
 /**
  * @warning
@@ -915,9 +964,37 @@ rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
  *   must be less than or equal to the value of nb_cpls.
  */
 __rte_experimental
-uint16_t
+static inline uint16_t
 rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
-		     uint16_t *last_idx, bool *has_error);
+		     uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans || nb_cpls == 0)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
 
 /**
  * @warning
@@ -947,10 +1024,27 @@ rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
  *   status array are also set.
  */
 __rte_experimental
-uint16_t
+static inline uint16_t
 rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
 			    const uint16_t nb_cpls, uint16_t *last_idx,
-			    enum rte_dma_status_code *status);
+			    enum rte_dma_status_code *status)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans ||
+	    nb_cpls == 0 || status == NULL)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
+}
 
 #ifdef __cplusplus
 }
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index ff7b70a..aa8e622 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -177,4 +177,6 @@ struct rte_dmadev {
 	uint64_t reserved[2]; /**< Reserved for future fields. */
 } __rte_cache_aligned;
 
+extern struct rte_dmadev rte_dmadevices[];
+
 #endif /* _RTE_DMADEV_CORE_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index 408b93c..86c5e75 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -27,6 +27,7 @@ EXPERIMENTAL {
 INTERNAL {
         global:
 
+	rte_dmadevices;
 	rte_dmadev_get_device_by_name;
 	rte_dmadev_pmd_allocate;
 	rte_dmadev_pmd_release;
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v16 5/9] doc: add DMA device library guide
  2021-08-23  3:31 ` [dpdk-dev] [PATCH v16 0/9] support dmadev Chengwen Feng
                     ` (3 preceding siblings ...)
  2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 4/9] dmadev: introduce DMA device library implementation Chengwen Feng
@ 2021-08-23  3:31   ` Chengwen Feng
  2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 6/9] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
                     ` (3 subsequent siblings)
  8 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-23  3:31 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch adds dmadev library guide.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Conor Walsh <conor.walsh@intel.com>
---
 doc/guides/prog_guide/dmadev.rst     | 125 ++++++++++++++++
 doc/guides/prog_guide/img/dmadev.svg | 283 +++++++++++++++++++++++++++++++++++
 doc/guides/prog_guide/index.rst      |   1 +
 3 files changed, 409 insertions(+)
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg

diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
new file mode 100644
index 0000000..75bac04
--- /dev/null
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -0,0 +1,125 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright 2021 HiSilicon Limited
+
+DMA Device Library
+====================
+
+The DMA library provides a DMA device framework for management and provisioning
+of hardware and software DMA poll mode drivers, defining generic APIs which
+support a number of different DMA operations.
+
+
+Design Principles
+-----------------
+
+The DMA library follows the same basic principles as those used in DPDK's
+Ethernet Device framework and the RegEx framework. The DMA framework provides
+a generic DMA device framework which supports both physical (hardware)
+and virtual (software) DMA devices as well as a generic DMA API which allows
+DMA devices to be managed and configured and supports DMA operations to be
+provisioned on DMA poll mode driver.
+
+.. _figure_dmadev:
+
+.. figure:: img/dmadev.*
+
+The above figure shows the model on which the DMA framework is built on:
+
+ * The DMA controller could have multiple hardware DMA channels (aka. hardware
+   DMA queues), each hardware DMA channel should be represented by a dmadev.
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+   channel represents a different transfer context. The DMA operation request
+   must be submitted to the virtual DMA channel. e.g. Application could create
+   virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+   virtual DMA channel 1 for memory-to-device transfer scenario.
+
+
+Device Management
+-----------------
+
+Device Creation
+~~~~~~~~~~~~~~~
+
+Physical DMA controllers are discovered during the PCI probe/enumeration of the
+EAL function which is executed at DPDK initialization, this is based on their
+PCI BDF (bus/bridge, device, function). Specific physical DMA controllers, like
+other physical devices in DPDK can be listed using the EAL command line options.
+
+The dmadevs are dynamically allocated by using the API
+``rte_dmadev_pmd_allocate`` based on the number of hardware DMA channels.
+
+
+Device Identification
+~~~~~~~~~~~~~~~~~~~~~
+
+Each DMA device, whether physical or virtual is uniquely designated by two
+identifiers:
+
+- A unique device index used to designate the DMA device in all functions
+  exported by the DMA API.
+
+- A device name used to designate the DMA device in console messages, for
+  administration or debugging purposes.
+
+
+Device Configuration
+~~~~~~~~~~~~~~~~~~~~
+
+The rte_dmadev_configure API is used to configure a DMA device.
+
+.. code-block:: c
+
+   int rte_dmadev_configure(uint16_t dev_id,
+                            const struct rte_dmadev_conf *dev_conf);
+
+The ``rte_dmadev_conf`` structure is used to pass the configuration parameters
+for the DMA device for example the number of virtual DMA channels to set up,
+indication of whether to enable silent mode.
+
+
+Configuration of Virtual DMA Channels
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The rte_dmadev_vchan_setup API is used to configure a virtual DMA channel.
+
+.. code-block:: c
+
+   int rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+                              const struct rte_dmadev_vchan_conf *conf);
+
+The ``rte_dmadev_vchan_conf`` structure is used to pass the configuration
+parameters for the virtual DMA channel for example transfer direction, number of
+descriptor for the virtual DMA channel, source device access port parameter,
+destination device access port parameter.
+
+
+Device Features and Capabilities
+--------------------------------
+
+DMA devices may support different feature sets. The ``rte_dmadev_info_get`` API
+can be used to get the device info and supported features.
+
+Silent mode is a special device capability which does not require the
+application to invoke dequeue APIs.
+
+
+Enqueue / Dequeue APIs
+~~~~~~~~~~~~~~~~~~~~~~
+
+Enqueue APIs such as ``rte_dmadev_copy`` and ``rte_dmadev_fill`` can be used to
+enqueue operations to hardware. If an enqueue is successful, a ``ring_idx`` is
+returned. This ``ring_idx`` can be used by applications to track per-operation
+metadata in an application-defined circular ring.
+
+The ``rte_dmadev_submit`` API is used to issue doorbell to hardware.
+Alternatively the ``RTE_DMA_OP_FLAG_SUBMIT`` flag can be passed to the enqueue
+APIs to also issue the doorbell to hardware.
+
+There are two dequeue APIs ``rte_dmadev_completed`` and
+``rte_dmadev_completed_status``, these are used to obtain the results of
+the enqueue requests. ``rte_dmadev_completed`` will return the number of
+successfully completed operations. ``rte_dmadev_completed_status`` will return
+the number of completed operations along with the status of each operation
+(filled into the ``status`` array passed by user). These two APIs can also
+return the last completed operation's ``ring_idx`` which could help user track
+operations within their own application-defined rings.
diff --git a/doc/guides/prog_guide/img/dmadev.svg b/doc/guides/prog_guide/img/dmadev.svg
new file mode 100644
index 0000000..157d7eb
--- /dev/null
+++ b/doc/guides/prog_guide/img/dmadev.svg
@@ -0,0 +1,283 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<!-- SPDX-License-Identifier: BSD-3-Clause -->
+<!-- Copyright(c) 2021 HiSilicon Limited -->
+
+<svg
+   width="128.64288mm"
+   height="95.477707mm"
+   viewBox="0 0 192.96433 143.21656"
+   version="1.1"
+   id="svg934"
+   inkscape:version="1.1 (c68e22c387, 2021-05-23)"
+   sodipodi:docname="dmadev.svg"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg">
+  <sodipodi:namedview
+     id="namedview936"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageshadow="2"
+     inkscape:pageopacity="0.0"
+     inkscape:pagecheckerboard="0"
+     inkscape:document-units="mm"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:showpageshadow="false"
+     inkscape:zoom="1.332716"
+     inkscape:cx="335.03011"
+     inkscape:cy="143.69152"
+     inkscape:window-width="1920"
+     inkscape:window-height="976"
+     inkscape:window-x="-8"
+     inkscape:window-y="-8"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="layer1"
+     scale-x="1.5"
+     units="mm" />
+  <defs
+     id="defs931">
+    <rect
+       x="342.43954"
+       y="106.56832"
+       width="58.257381"
+       height="137.82834"
+       id="rect17873" />
+  </defs>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-0.13857517,-21.527306)">
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9"
+       width="50"
+       height="28"
+       x="0.13857517"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1"
+       transform="translate(-49.110795,15.205683)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1045">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1047">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5"
+       width="50"
+       height="28"
+       x="60.138577"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4"
+       transform="translate(10.512565,15.373298)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1049">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1051">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5-3"
+       width="50"
+       height="28"
+       x="137.43863"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-8"
+       transform="translate(88.79231,15.373299)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1053">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1055">channel</tspan></text>
+    <text
+       xml:space="preserve"
+       transform="matrix(0.26458333,0,0,0.26458333,-0.04940429,21.408845)"
+       id="text17871"
+       style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;white-space:pre;shape-inside:url(#rect17873);fill:#000000;fill-opacity:1;stroke:none" />
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8"
+       width="38.34557"
+       height="19.729115"
+       x="36.138577"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3"
+       transform="translate(-13.394978,59.135217)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1057">dmadev</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0"
+       width="60.902534"
+       height="24.616455"
+       x="25.196909"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76"
+       transform="translate(-24.485484,90.97883)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1059">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1061">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-6"
+       width="60.902534"
+       height="24.616455"
+       x="132.20036"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-7"
+       transform="translate(82.950904,90.79085)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1063">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1065">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-4"
+       width="60.902534"
+       height="24.616455"
+       x="76.810928"
+       y="140.12741"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-4"
+       transform="translate(27.032341,133.10574)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1067">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1069">controller</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8-5"
+       width="38.34557"
+       height="19.729115"
+       x="143.43863"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-7"
+       transform="translate(94.92597,59.664385)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1071">dmadev</tspan></text>
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 74.476373,49.527306 62.82407,64.827354"
+       id="path45308"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 35.924309,49.527306 47.711612,64.827354"
+       id="path45310"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 55.403414,84.556469 55.53332,98.47744"
+       id="path45312"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8"
+       inkscape:connection-end="#rect31-9-5-8-0" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.62241,84.556469 0.0155,13.920971"
+       id="path45320"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-5"
+       inkscape:connection-end="#rect31-9-5-8-0-6" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 146.28317,123.09389 -22.65252,17.03352"
+       id="path45586"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0-6"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 70.900938,123.09389 21.108496,17.03352"
+       id="path45588"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.50039,49.527306 0.0675,15.300048"
+       id="path45956"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-3"
+       inkscape:connection-end="#rect31-9-5-8-5" />
+  </g>
+</svg>
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 2dce507..0abea06 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -29,6 +29,7 @@ Programmer's Guide
     regexdev
     rte_security
     rawdev
+    dmadev
     link_bonding_poll_mode_drv_lib
     timer_lib
     hash_lib
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v16 6/9] dma/skeleton: introduce skeleton dmadev driver
  2021-08-23  3:31 ` [dpdk-dev] [PATCH v16 0/9] support dmadev Chengwen Feng
                     ` (4 preceding siblings ...)
  2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 5/9] doc: add DMA device library guide Chengwen Feng
@ 2021-08-23  3:31   ` Chengwen Feng
  2021-08-26 18:39     ` Bruce Richardson
  2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 7/9] dma/skeleton: add test cases Chengwen Feng
                     ` (2 subsequent siblings)
  8 siblings, 1 reply; 339+ messages in thread
From: Chengwen Feng @ 2021-08-23  3:31 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

Skeleton dmadevice driver, on the lines of rawdev skeleton, is for
showcasing of the dmadev library. This driver implements cpucopy 'DMA',
so that a test module can be developed.

Design of skeleton involves a virtual device which is plugged into VDEV
bus on initialization.

Also, enable compilation of dmadev skeleton drivers.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
 drivers/dma/meson.build                |  11 +
 drivers/dma/skeleton/meson.build       |   7 +
 drivers/dma/skeleton/skeleton_dmadev.c | 595 +++++++++++++++++++++++++++++++++
 drivers/dma/skeleton/skeleton_dmadev.h |  75 +++++
 drivers/dma/skeleton/version.map       |   3 +
 drivers/meson.build                    |   1 +
 6 files changed, 692 insertions(+)
 create mode 100644 drivers/dma/meson.build
 create mode 100644 drivers/dma/skeleton/meson.build
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
 create mode 100644 drivers/dma/skeleton/version.map

diff --git a/drivers/dma/meson.build b/drivers/dma/meson.build
new file mode 100644
index 0000000..0c2c34c
--- /dev/null
+++ b/drivers/dma/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+if is_windows
+    subdir_done()
+endif
+
+drivers = [
+        'skeleton',
+]
+std_deps = ['dmadev']
diff --git a/drivers/dma/skeleton/meson.build b/drivers/dma/skeleton/meson.build
new file mode 100644
index 0000000..27509b1
--- /dev/null
+++ b/drivers/dma/skeleton/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+deps += ['dmadev', 'kvargs', 'ring', 'bus_vdev']
+sources = files(
+        'skeleton_dmadev.c',
+)
diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c
new file mode 100644
index 0000000..b3ab4a0
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.c
@@ -0,0 +1,595 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_kvargs.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_ring.h>
+
+#include <rte_dmadev_pmd.h>
+
+#include "skeleton_dmadev.h"
+
+/* Count of instances */
+static uint16_t skeldma_init_once;
+
+static int
+skeldma_info_get(const struct rte_dmadev *dev, struct rte_dmadev_info *dev_info,
+		 uint32_t info_sz)
+{
+#define SKELDMA_MAX_DESC	8192
+#define SKELDMA_MIN_DESC	128
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(info_sz);
+
+	dev_info->dev_capa = RTE_DMADEV_CAPA_MEM_TO_MEM |
+			     RTE_DMADEV_CAPA_SVA |
+			     RTE_DMADEV_CAPA_OPS_COPY;
+	dev_info->max_vchans = 1;
+	dev_info->max_desc = SKELDMA_MAX_DESC;
+	dev_info->min_desc = SKELDMA_MIN_DESC;
+
+	return 0;
+}
+
+static int
+skeldma_configure(struct rte_dmadev *dev, const struct rte_dmadev_conf *conf)
+{
+	RTE_SET_USED(dev);
+	RTE_SET_USED(conf);
+	return 0;
+}
+
+static void *
+cpucopy_thread(void *param)
+{
+#define SLEEP_THRESHOLD		10000
+#define SLEEP_US_VAL		10
+
+	struct rte_dmadev *dev = (struct rte_dmadev *)param;
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	int ret;
+
+	while (!hw->exit_flag) {
+		ret = rte_ring_dequeue(hw->desc_running, (void **)&desc);
+		if (ret) {
+			hw->zero_req_count++;
+			if (hw->zero_req_count > SLEEP_THRESHOLD) {
+				if (hw->zero_req_count == 0)
+					hw->zero_req_count = SLEEP_THRESHOLD;
+				rte_delay_us_sleep(SLEEP_US_VAL);
+			}
+			continue;
+		}
+
+		hw->zero_req_count = 0;
+		rte_memcpy(desc->dst, desc->src, desc->len);
+		hw->completed_count++;
+		(void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
+	}
+
+	return NULL;
+}
+
+static void
+fflush_ring(struct skeldma_hw *hw, struct rte_ring *ring)
+{
+	struct skeldma_desc *desc = NULL;
+	while (rte_ring_count(ring) > 0) {
+		(void)rte_ring_dequeue(ring, (void **)&desc);
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+}
+
+static int
+skeldma_start(struct rte_dmadev *dev)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	rte_cpuset_t cpuset;
+	int ret;
+
+	if (hw->desc_mem == NULL) {
+		SKELDMA_ERR("Vchan was not setup, start fail!\n");
+		return -EINVAL;
+	}
+
+	/* Reset the dmadev to a known state, include:
+	 * 1) fflush pending/running/completed ring to empty ring.
+	 * 2) init ring idx to zero.
+	 * 3) init running statistics.
+	 * 4) mark cpucopy task exit_flag to false.
+	 */
+	fflush_ring(hw, hw->desc_pending);
+	fflush_ring(hw, hw->desc_running);
+	fflush_ring(hw, hw->desc_completed);
+	hw->ridx = 0;
+	hw->submitted_count = 0;
+	hw->zero_req_count = 0;
+	hw->completed_count = 0;
+	hw->exit_flag = false;
+
+	rte_mb();
+
+	ret = rte_ctrl_thread_create(&hw->thread, "dma_skeleton", NULL,
+				     cpucopy_thread, dev);
+	if (ret) {
+		SKELDMA_ERR("Start cpucopy thread fail!\n");
+		return -EINVAL;
+	}
+
+	if (hw->lcore_id != -1) {
+		cpuset = rte_lcore_cpuset(hw->lcore_id);
+		ret = pthread_setaffinity_np(hw->thread, sizeof(cpuset),
+					     &cpuset);
+		if (ret)
+			SKELDMA_WARN("Set thread affinity lcore = %u fail!\n",
+				     hw->lcore_id);
+	}
+
+	return 0;
+}
+
+static int
+skeldma_stop(struct rte_dmadev *dev)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	hw->exit_flag = true;
+	rte_delay_ms(1);
+
+	pthread_cancel(hw->thread);
+	pthread_join(hw->thread, NULL);
+
+	return 0;
+}
+
+static int
+vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc)
+{
+	struct skeldma_desc *desc;
+	struct rte_ring *empty;
+	struct rte_ring *pending;
+	struct rte_ring *running;
+	struct rte_ring *completed;
+	uint16_t i;
+
+	desc = rte_zmalloc_socket("dma_skelteon_desc",
+				  nb_desc * sizeof(struct skeldma_desc),
+				  RTE_CACHE_LINE_SIZE, hw->socket_id);
+	if (desc == NULL) {
+		SKELDMA_ERR("Malloc dma skeleton desc fail!\n");
+		return -ENOMEM;
+	}
+
+	empty = rte_ring_create("dma_skeleton_desc_empty", nb_desc,
+				hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	pending = rte_ring_create("dma_skeleton_desc_pending", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	running = rte_ring_create("dma_skeleton_desc_running", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	completed = rte_ring_create("dma_skeleton_desc_completed", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (empty == NULL || pending == NULL || running == NULL ||
+	    completed == NULL) {
+		SKELDMA_ERR("Create dma skeleton desc ring fail!\n");
+		rte_ring_free(empty);
+		rte_ring_free(pending);
+		rte_ring_free(running);
+		rte_ring_free(completed);
+		rte_free(desc);
+		return -ENOMEM;
+	}
+
+	/* The real usable ring size is *count-1* instead of *count* to
+	 * differentiate a free ring from an empty ring.
+	 * @see rte_ring_create
+	 */
+	for (i = 0; i < nb_desc - 1; i++)
+		(void)rte_ring_enqueue(empty, (void *)(desc + i));
+
+	hw->desc_mem = desc;
+	hw->desc_empty = empty;
+	hw->desc_pending = pending;
+	hw->desc_running = running;
+	hw->desc_completed = completed;
+
+	return 0;
+}
+
+static void
+vchan_release(struct skeldma_hw *hw)
+{
+	if (hw->desc_mem == NULL)
+		return;
+
+	rte_free(hw->desc_mem);
+	hw->desc_mem = NULL;
+	rte_ring_free(hw->desc_empty);
+	hw->desc_empty = NULL;
+	rte_ring_free(hw->desc_pending);
+	hw->desc_pending = NULL;
+	rte_ring_free(hw->desc_running);
+	hw->desc_running = NULL;
+	rte_ring_free(hw->desc_completed);
+	hw->desc_completed = NULL;
+}
+
+static int
+skeldma_close(struct rte_dmadev *dev)
+{
+	/* The device already stopped */
+	vchan_release(dev->dev_private);
+	return 0;
+}
+
+static int
+skeldma_vchan_setup(struct rte_dmadev *dev, uint16_t vchan,
+		    const struct rte_dmadev_vchan_conf *conf)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+
+	if (!rte_is_power_of_2(conf->nb_desc)) {
+		SKELDMA_ERR("Number of desc must be power of 2!\n");
+		return -EINVAL;
+	}
+
+	vchan_release(hw);
+	return vchan_setup(hw, conf->nb_desc);
+}
+
+static int
+skeldma_stats_get(const struct rte_dmadev *dev, uint16_t vchan,
+		  struct rte_dmadev_stats *stats, uint32_t stats_sz)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(stats_sz);
+
+	stats->submitted = hw->submitted_count;
+	stats->completed = hw->completed_count;
+	stats->errors = 0;
+
+	return 0;
+}
+
+static int
+skeldma_stats_reset(struct rte_dmadev *dev, uint16_t vchan)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+
+	hw->submitted_count = 0;
+	hw->completed_count = 0;
+
+	return 0;
+}
+
+static int
+skeldma_dump(const struct rte_dmadev *dev, FILE *f)
+{
+#define GET_RING_COUNT(ring)	((ring) ? (rte_ring_count(ring)) : 0)
+
+	struct skeldma_hw *hw = dev->dev_private;
+
+	fprintf(f,
+		"  lcore_id: %d\n"
+		"  socket_id: %d\n"
+		"  desc_empty_ring_count: %u\n"
+		"  desc_pending_ring_count: %u\n"
+		"  desc_running_ring_count: %u\n"
+		"  desc_completed_ring_count: %u\n",
+		hw->lcore_id, hw->socket_id,
+		GET_RING_COUNT(hw->desc_empty),
+		GET_RING_COUNT(hw->desc_pending),
+		GET_RING_COUNT(hw->desc_running),
+		GET_RING_COUNT(hw->desc_completed));
+	fprintf(f,
+		"  next_ring_idx: %u\n"
+		"  submitted_count: %" PRIu64 "\n"
+		"  completed_count: %" PRIu64 "\n",
+		hw->ridx, hw->submitted_count, hw->completed_count);
+
+	return 0;
+}
+
+static void
+submit(struct skeldma_hw *hw, struct skeldma_desc *desc)
+{
+	uint16_t count = rte_ring_count(hw->desc_pending);
+	struct skeldma_desc *pend_desc = NULL;
+
+	while (count > 0) {
+		(void)rte_ring_dequeue(hw->desc_pending, (void **)&pend_desc);
+		(void)rte_ring_enqueue(hw->desc_running, (void *)pend_desc);
+		count--;
+	}
+
+	if (desc)
+		(void)rte_ring_enqueue(hw->desc_running, (void *)desc);
+}
+
+static int
+skeldma_copy(struct rte_dmadev *dev, uint16_t vchan,
+	     rte_iova_t src, rte_iova_t dst,
+	     uint32_t length, uint64_t flags)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc;
+	int ret;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(flags);
+
+	ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc);
+	if (ret)
+		return -ENOSPC;
+	desc->src = (void *)src;
+	desc->dst = (void *)dst;
+	desc->len = length;
+	desc->ridx = hw->ridx++;
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT)
+		submit(hw, desc);
+	else
+		(void)rte_ring_enqueue(hw->desc_pending, (void *)desc);
+	hw->submitted_count++;
+
+	return 0;
+}
+
+static int
+skeldma_submit(struct rte_dmadev *dev, uint16_t vchan)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	RTE_SET_USED(vchan);
+	submit(hw, NULL);
+	return 0;
+}
+
+static uint16_t
+skeldma_completed(struct rte_dmadev *dev,
+		  uint16_t vchan, const uint16_t nb_cpls,
+		  uint16_t *last_idx, bool *has_error)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	uint16_t index = 0;
+	uint16_t count;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(has_error);
+
+	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+	while (index < count) {
+		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+		if (index == count - 1)
+			*last_idx = desc->ridx;
+		index++;
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+
+	return count;
+}
+
+static uint16_t
+skeldma_completed_status(struct rte_dmadev *dev,
+			 uint16_t vchan, const uint16_t nb_cpls,
+			 uint16_t *last_idx, enum rte_dma_status_code *status)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	uint16_t index = 0;
+	uint16_t count;
+
+	RTE_SET_USED(vchan);
+
+	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+	while (index < count) {
+		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+		if (index == count - 1)
+			*last_idx = desc->ridx;
+		status[index++] = RTE_DMA_STATUS_SUCCESSFUL;
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+
+	return count;
+}
+
+static const struct rte_dmadev_ops skeldma_ops = {
+	.dev_info_get = skeldma_info_get,
+	.dev_configure = skeldma_configure,
+	.dev_start = skeldma_start,
+	.dev_stop = skeldma_stop,
+	.dev_close = skeldma_close,
+
+	.vchan_setup = skeldma_vchan_setup,
+
+	.stats_get = skeldma_stats_get,
+	.stats_reset = skeldma_stats_reset,
+
+	.dev_dump = skeldma_dump,
+};
+
+static int
+skeldma_create(const char *name, struct rte_vdev_device *vdev, int lcore_id)
+{
+	struct rte_dmadev *dev;
+	struct skeldma_hw *hw;
+	int socket_id;
+
+	dev = rte_dmadev_pmd_allocate(name);
+	if (dev == NULL) {
+		SKELDMA_ERR("Unable to allocate dmadev: %s\n", name);
+		return -EINVAL;
+	}
+
+	socket_id = (lcore_id < 0) ? rte_socket_id() :
+				     rte_lcore_to_socket_id(lcore_id);
+	dev->dev_private = rte_zmalloc_socket("dmadev private",
+					      sizeof(struct skeldma_hw),
+					      RTE_CACHE_LINE_SIZE,
+					      socket_id);
+	if (!dev->dev_private) {
+		SKELDMA_ERR("Unable to allocate device private memory\n");
+		(void)rte_dmadev_pmd_release(dev);
+		return -ENOMEM;
+	}
+
+	dev->copy = skeldma_copy;
+	dev->submit = skeldma_submit;
+	dev->completed = skeldma_completed;
+	dev->completed_status = skeldma_completed_status;
+	dev->dev_ops = &skeldma_ops;
+	dev->device = &vdev->device;
+
+	hw = dev->dev_private;
+	hw->lcore_id = lcore_id;
+	hw->socket_id = socket_id;
+
+	return dev->data->dev_id;
+}
+
+static int
+skeldma_destroy(const char *name)
+{
+	struct rte_dmadev *dev;
+	int ret;
+
+	dev = rte_dmadev_get_device_by_name(name);
+	if (!dev)
+		return -EINVAL;
+
+	ret = rte_dmadev_close(dev->data->dev_id);
+	if (ret)
+		return ret;
+
+	rte_free(dev->dev_private);
+	dev->dev_private = NULL;
+	(void)rte_dmadev_pmd_release(dev);
+
+	return 0;
+}
+
+static int
+skeldma_parse_lcore(const char *key __rte_unused,
+		    const char *value,
+		    void *opaque)
+{
+	int lcore_id = atoi(value);
+	if (lcore_id >= 0 && lcore_id < RTE_MAX_LCORE)
+		*(int *)opaque = lcore_id;
+	return 0;
+}
+
+static void
+skeldma_parse_vdev_args(struct rte_vdev_device *vdev, int *lcore_id)
+{
+	static const char *const args[] = {
+		SKELDMA_ARG_LCORE,
+		NULL
+	};
+
+	struct rte_kvargs *kvlist;
+	const char *params;
+
+	params = rte_vdev_device_args(vdev);
+	if (params == NULL || params[0] == '\0')
+		return;
+
+	kvlist = rte_kvargs_parse(params, args);
+	if (!kvlist)
+		return;
+
+	(void)rte_kvargs_process(kvlist, SKELDMA_ARG_LCORE,
+				 skeldma_parse_lcore, lcore_id);
+
+	SKELDMA_INFO("Parse lcore_id = %d\n", *lcore_id);
+
+	rte_kvargs_free(kvlist);
+}
+
+static int
+skeldma_probe(struct rte_vdev_device *vdev)
+{
+	const char *name;
+	int lcore_id = -1;
+	int ret;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		SKELDMA_ERR("Multiple process not supported for %s\n", name);
+		return -EINVAL;
+	}
+
+	/* More than one instance is not supported */
+	if (skeldma_init_once) {
+		SKELDMA_ERR("Multiple instance not supported for %s\n", name);
+		return -EINVAL;
+	}
+
+	skeldma_parse_vdev_args(vdev, &lcore_id);
+
+	ret = skeldma_create(name, vdev, lcore_id);
+	if (ret >= 0) {
+		SKELDMA_INFO("Create %s dmadev lcore-id %d\n", name, lcore_id);
+		/* Device instance created; Second instance not possible */
+		skeldma_init_once = 1;
+	}
+
+	return ret < 0 ? ret : 0;
+}
+
+static int
+skeldma_remove(struct rte_vdev_device *vdev)
+{
+	const char *name;
+	int ret;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -1;
+
+	ret = skeldma_destroy(name);
+	if (!ret) {
+		skeldma_init_once = 0;
+		SKELDMA_INFO("Remove %s dmadev\n", name);
+	}
+
+	return ret;
+}
+
+static struct rte_vdev_driver skeldma_pmd_drv = {
+	.probe = skeldma_probe,
+	.remove = skeldma_remove,
+	.drv_flags = RTE_VDEV_DRV_NEED_IOVA_AS_VA,
+};
+
+RTE_LOG_REGISTER_DEFAULT(skeldma_logtype, INFO);
+RTE_PMD_REGISTER_VDEV(dma_skeleton, skeldma_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(dma_skeleton,
+		SKELDMA_ARG_LCORE "=<uint16> ");
diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h
new file mode 100644
index 0000000..6495653
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef __SKELETON_DMADEV_H__
+#define __SKELETON_DMADEV_H__
+
+#include <rte_dmadev.h>
+
+extern int skeldma_logtype;
+#define SKELDMA_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, skeldma_logtype, "%s(): " fmt "", \
+		__func__, ##args)
+
+#define SKELDMA_DEBUG(fmt, args...) \
+	SKELDMA_LOG(DEBUG, fmt, ## args)
+#define SKELDMA_INFO(fmt, args...) \
+	SKELDMA_LOG(INFO, fmt, ## args)
+#define SKELDMA_WARN(fmt, args...) \
+	SKELDMA_LOG(WARNING, fmt, ## args)
+#define SKELDMA_ERR(fmt, args...) \
+	SKELDMA_LOG(ERR, fmt, ## args)
+
+#define SKELDMA_ARG_LCORE	"lcore"
+
+struct skeldma_desc {
+	void *src;
+	void *dst;
+	uint32_t len;
+	uint16_t ridx; /* ring idx */
+};
+
+struct skeldma_hw {
+	int lcore_id; /* cpucopy task affinity core */
+	int socket_id;
+	pthread_t thread; /* cpucopy task thread */
+	volatile int exit_flag; /* cpucopy task exit flag */
+
+	struct skeldma_desc *desc_mem;
+
+	/* Descriptor ring state machine:
+	 *
+	 *  -----------     enqueue without submit     -----------
+	 *  |  empty  |------------------------------->| pending |
+	 *  -----------\                               -----------
+	 *       ^      \------------                       |
+	 *       |                  |                       |submit doorbell
+	 *       |                  |                       |
+	 *       |                  |enqueue with submit    |
+	 *       |get completed     |------------------|    |
+	 *       |                                     |    |
+	 *       |                                     v    v
+	 *  -----------     cpucopy thread working     -----------
+	 *  |completed|<-------------------------------| running |
+	 *  -----------                                -----------
+	 */
+	struct rte_ring *desc_empty;
+	struct rte_ring *desc_pending;
+	struct rte_ring *desc_running;
+	struct rte_ring *desc_completed;
+
+	/* Cache delimiter for dataplane API's operation data */
+	char cache1 __rte_cache_aligned;
+	uint16_t ridx;  /* ring idx */
+	uint64_t submitted_count;
+
+	/* Cache delimiter for cpucopy thread's operation data */
+	char cache2 __rte_cache_aligned;
+	uint32_t zero_req_count;
+	uint64_t completed_count;
+};
+
+int test_dma_skeleton(uint16_t dev_id);
+
+#endif /* __SKELETON_DMADEV_H__ */
diff --git a/drivers/dma/skeleton/version.map b/drivers/dma/skeleton/version.map
new file mode 100644
index 0000000..c2e0723
--- /dev/null
+++ b/drivers/dma/skeleton/version.map
@@ -0,0 +1,3 @@
+DPDK_22 {
+	local: *;
+};
diff --git a/drivers/meson.build b/drivers/meson.build
index bc6f4f5..383f648 100644
--- a/drivers/meson.build
+++ b/drivers/meson.build
@@ -18,6 +18,7 @@ subdirs = [
         'vdpa',           # depends on common, bus and mempool.
         'event',          # depends on common, bus, mempool and net.
         'baseband',       # depends on common and bus.
+        'dma',            # depends on common and bus.
 ]
 
 if meson.is_cross_build()
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v16 7/9] dma/skeleton: add test cases
  2021-08-23  3:31 ` [dpdk-dev] [PATCH v16 0/9] support dmadev Chengwen Feng
                     ` (5 preceding siblings ...)
  2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 6/9] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
@ 2021-08-23  3:31   ` Chengwen Feng
  2021-08-23 14:03     ` Bruce Richardson
  2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 8/9] test: enable dmadev skeleton test Chengwen Feng
  2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 9/9] maintainers: add for dmadev Chengwen Feng
  8 siblings, 1 reply; 339+ messages in thread
From: Chengwen Feng @ 2021-08-23  3:31 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

Patch introduces dmadev unit testcase for validation against the
skeleton dmadev PMD implementation.

Test cases are added along with the skeleton driver implementation.
It can be enabled by using vdev argument to any DPDK binary:

	--vdev="dma_skeleton,selftest=1"

In case 'selftest=1' is not provided, autotest doesn't execute the
test cases but the vdev is still available for application use.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
 drivers/dma/skeleton/meson.build            |   1 +
 drivers/dma/skeleton/skeleton_dmadev.c      |  34 +-
 drivers/dma/skeleton/skeleton_dmadev.h      |   1 +
 drivers/dma/skeleton/skeleton_dmadev_test.c | 521 ++++++++++++++++++++++++++++
 4 files changed, 553 insertions(+), 4 deletions(-)
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev_test.c

diff --git a/drivers/dma/skeleton/meson.build b/drivers/dma/skeleton/meson.build
index 27509b1..5d47339 100644
--- a/drivers/dma/skeleton/meson.build
+++ b/drivers/dma/skeleton/meson.build
@@ -4,4 +4,5 @@
 deps += ['dmadev', 'kvargs', 'ring', 'bus_vdev']
 sources = files(
         'skeleton_dmadev.c',
+        'skeleton_dmadev_test.c',
 )
diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c
index b3ab4a0..1707e88 100644
--- a/drivers/dma/skeleton/skeleton_dmadev.c
+++ b/drivers/dma/skeleton/skeleton_dmadev.c
@@ -430,6 +430,7 @@ static const struct rte_dmadev_ops skeldma_ops = {
 	.stats_reset = skeldma_stats_reset,
 
 	.dev_dump = skeldma_dump,
+	.dev_selftest = test_dma_skeleton,
 };
 
 static int
@@ -503,11 +504,24 @@ skeldma_parse_lcore(const char *key __rte_unused,
 	return 0;
 }
 
+static int
+skeldma_parse_selftest(const char *key __rte_unused,
+		       const char *value,
+		       void *opaque)
+{
+	int flag = atoi(value);
+	if (flag == 0 || flag == 1)
+		*(int *)opaque = flag;
+	return 0;
+}
+
 static void
-skeldma_parse_vdev_args(struct rte_vdev_device *vdev, int *lcore_id)
+skeldma_parse_vdev_args(struct rte_vdev_device *vdev,
+			int *lcore_id, int *selftest)
 {
 	static const char *const args[] = {
 		SKELDMA_ARG_LCORE,
+		SKELDMA_ARG_SELFTEST,
 		NULL
 	};
 
@@ -524,8 +538,11 @@ skeldma_parse_vdev_args(struct rte_vdev_device *vdev, int *lcore_id)
 
 	(void)rte_kvargs_process(kvlist, SKELDMA_ARG_LCORE,
 				 skeldma_parse_lcore, lcore_id);
+	(void)rte_kvargs_process(kvlist, SKELDMA_ARG_SELFTEST,
+				 skeldma_parse_selftest, selftest);
 
-	SKELDMA_INFO("Parse lcore_id = %d\n", *lcore_id);
+	SKELDMA_INFO("Parse lcore_id = %d selftest = %d\n",
+		     *lcore_id, *selftest);
 
 	rte_kvargs_free(kvlist);
 }
@@ -535,6 +552,7 @@ skeldma_probe(struct rte_vdev_device *vdev)
 {
 	const char *name;
 	int lcore_id = -1;
+	int selftest = 0;
 	int ret;
 
 	name = rte_vdev_device_name(vdev);
@@ -552,10 +570,17 @@ skeldma_probe(struct rte_vdev_device *vdev)
 		return -EINVAL;
 	}
 
-	skeldma_parse_vdev_args(vdev, &lcore_id);
+	skeldma_parse_vdev_args(vdev, &lcore_id, &selftest);
 
 	ret = skeldma_create(name, vdev, lcore_id);
 	if (ret >= 0) {
+		/* In case command line argument for 'selftest' was passed;
+		 * if invalid arguments were passed, execution continues but
+		 * without selftest.
+		 */
+		if (selftest)
+			(void)test_dma_skeleton(ret);
+
 		SKELDMA_INFO("Create %s dmadev lcore-id %d\n", name, lcore_id);
 		/* Device instance created; Second instance not possible */
 		skeldma_init_once = 1;
@@ -592,4 +617,5 @@ static struct rte_vdev_driver skeldma_pmd_drv = {
 RTE_LOG_REGISTER_DEFAULT(skeldma_logtype, INFO);
 RTE_PMD_REGISTER_VDEV(dma_skeleton, skeldma_pmd_drv);
 RTE_PMD_REGISTER_PARAM_STRING(dma_skeleton,
-		SKELDMA_ARG_LCORE "=<uint16> ");
+		SKELDMA_ARG_LCORE "=<uint16> "
+		SKELDMA_ARG_SELFTEST "=<0|1> ");
diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h
index 6495653..e8a310d 100644
--- a/drivers/dma/skeleton/skeleton_dmadev.h
+++ b/drivers/dma/skeleton/skeleton_dmadev.h
@@ -22,6 +22,7 @@ extern int skeldma_logtype;
 	SKELDMA_LOG(ERR, fmt, ## args)
 
 #define SKELDMA_ARG_LCORE	"lcore"
+#define SKELDMA_ARG_SELFTEST	"selftest"
 
 struct skeldma_desc {
 	void *src;
diff --git a/drivers/dma/skeleton/skeleton_dmadev_test.c b/drivers/dma/skeleton/skeleton_dmadev_test.c
new file mode 100644
index 0000000..be56f07
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev_test.c
@@ -0,0 +1,521 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_test.h>
+
+/* Using relative path as skeleton_dmadev is not part of exported headers */
+#include "skeleton_dmadev.h"
+
+#define SKELDMA_TEST_DEBUG(fmt, args...) \
+	SKELDMA_LOG(DEBUG, fmt, ## args)
+#define SKELDMA_TEST_INFO(fmt, args...) \
+	SKELDMA_LOG(INFO, fmt, ## args)
+
+#define SKELDMA_TEST_RUN(test) \
+	testsuite_run_test(test, #test)
+
+#define TEST_MEMCPY_SIZE	1024
+#define TEST_WAIT_US_VAL	50000
+
+#define TEST_SUCCESS 0
+#define TEST_FAILED  -1
+
+static uint16_t test_dev_id;
+static uint16_t invalid_dev_id;
+
+static int total;
+static int passed;
+static int failed;
+static char *src;
+static char *dst;
+
+static int
+testsuite_setup(uint16_t dev_id)
+{
+	test_dev_id = dev_id;
+	invalid_dev_id = RTE_DMADEV_MAX_DEVS;
+
+	src = rte_malloc("dmadev_test_src", TEST_MEMCPY_SIZE, 0);
+	if (src == NULL)
+		return -ENOMEM;
+	dst = rte_malloc("dmadev_test_dst", TEST_MEMCPY_SIZE, 0);
+	if (dst == NULL)
+		return -ENOMEM;
+
+	total = 0;
+	passed = 0;
+	failed = 0;
+
+	return 0;
+}
+
+static void
+testsuite_teardown(void)
+{
+	rte_free(src);
+	rte_free(dst);
+	/* Ensure the dmadev is stopped. */
+	rte_dmadev_stop(test_dev_id);
+}
+
+static void
+testsuite_run_test(int (*test)(void), const char *name)
+{
+	int ret = 0;
+
+	if (test) {
+		ret = test();
+		if (ret < 0) {
+			failed++;
+			SKELDMA_TEST_INFO("%s Failed", name);
+		} else {
+			passed++;
+			SKELDMA_TEST_DEBUG("%s Passed", name);
+		}
+	}
+
+	total++;
+}
+
+static int
+test_dmadev_get_dev_id(void)
+{
+	int ret = rte_dmadev_get_dev_id("invalid_dmadev_device");
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_is_valid_dev(void)
+{
+	int ret;
+	ret = rte_dmadev_is_valid_dev(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == false, "Expected false for invalid dev id");
+	ret = rte_dmadev_is_valid_dev(test_dev_id);
+	RTE_TEST_ASSERT(ret == true, "Expected true for valid dev id");
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_count(void)
+{
+	uint16_t count = rte_dmadev_count();
+	RTE_TEST_ASSERT(count > 0, "Invalid dmadev count %u", count);
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_info_get(void)
+{
+	struct rte_dmadev_info info =  { 0 };
+	int ret;
+
+	ret = rte_dmadev_info_get(invalid_dev_id, &info);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_info_get(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_configure(void)
+{
+	struct rte_dmadev_conf conf = { 0 };
+	struct rte_dmadev_info info = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_configure(invalid_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_configure(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for nb_vchans == 0 */
+	memset(&conf, 0, sizeof(conf));
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for conf.nb_vchans > info.max_vchans */
+	ret = rte_dmadev_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans + 1;
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check enable silent mode */
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans;
+	conf.enable_silent = true;
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Configure success */
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans;
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret);
+
+	/* Check configure success */
+	ret = rte_dmadev_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	RTE_TEST_ASSERT_EQUAL(conf.nb_vchans, info.nb_vchans,
+			      "Configure nb_vchans not match");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_vchan_setup(void)
+{
+	struct rte_dmadev_vchan_conf vchan_conf = { 0 };
+	struct rte_dmadev_conf dev_conf = { 0 };
+	struct rte_dmadev_info dev_info = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_vchan_setup(invalid_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Make sure configure success */
+	ret = rte_dmadev_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	dev_conf.nb_vchans = dev_info.max_vchans;
+	ret = rte_dmadev_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret);
+
+	/* Check for invalid vchan */
+	ret = rte_dmadev_vchan_setup(test_dev_id, dev_conf.nb_vchans,
+				     &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for direction */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV + 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM - 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for direction and dev_capa combination */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_DEV;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_MEM;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for nb_desc validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc - 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.nb_desc = dev_info.max_desc + 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check src port type validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	vchan_conf.src_port.port_type = RTE_DMADEV_PORT_PCIE;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check dst port type validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	vchan_conf.dst_port.port_type = RTE_DMADEV_PORT_PCIE;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check vchan setup success */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+setup_one_vchan(void)
+{
+	struct rte_dmadev_vchan_conf vchan_conf = { 0 };
+	struct rte_dmadev_info dev_info = { 0 };
+	struct rte_dmadev_conf dev_conf = { 0 };
+	int ret;
+
+	ret = rte_dmadev_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+	dev_conf.nb_vchans = dev_info.max_vchans;
+	ret = rte_dmadev_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_start_stop(void)
+{
+	struct rte_dmadev_vchan_conf vchan_conf = { 0 };
+	struct rte_dmadev_conf dev_conf = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_start(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stop(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dmadev_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Check reconfigure and vchan setup when device started */
+	ret = rte_dmadev_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT(ret == -EBUSY, "Failed to configure, %d", ret);
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EBUSY, "Failed to setup vchan, %d", ret);
+
+	ret = rte_dmadev_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_stats(void)
+{
+	struct rte_dmadev_info dev_info = { 0 };
+	struct rte_dmadev_stats stats = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_stats_get(invalid_dev_id, 0, &stats);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stats_get(invalid_dev_id, 0, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stats_reset(invalid_dev_id, 0);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	/* Check for invalid vchan */
+	ret = rte_dmadev_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+	ret = rte_dmadev_stats_get(test_dev_id, dev_info.max_vchans, &stats);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stats_reset(test_dev_id, dev_info.max_vchans);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for valid vchan */
+	ret = rte_dmadev_stats_get(test_dev_id, 0, &stats);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get stats, %d", ret);
+	ret = rte_dmadev_stats_get(test_dev_id, RTE_DMADEV_ALL_VCHAN, &stats);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get all stats, %d", ret);
+	ret = rte_dmadev_stats_reset(test_dev_id, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset stats, %d", ret);
+	ret = rte_dmadev_stats_reset(test_dev_id, RTE_DMADEV_ALL_VCHAN);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset all stats, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_completed(void)
+{
+	uint16_t last_idx = 1;
+	bool has_error = true;
+	uint16_t cpl_ret;
+	int ret, i;
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dmadev_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Setup test memory */
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++)
+		src[i] = (char)i;
+	memset(dst, 0, TEST_MEMCPY_SIZE);
+
+	/* Check enqueue without submit */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed(test_dev_id, 0, 1, &last_idx,
+				       &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to get completed");
+
+	/* Check add submit */
+	ret = rte_dmadev_submit(test_dev_id, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to submit, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed(test_dev_id, 0, 1, &last_idx,
+				       &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u",
+				last_idx);
+	RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++) {
+		if (src[i] != dst[i]) {
+			RTE_TEST_ASSERT_EQUAL(src[i], dst[i],
+				"Failed to copy memory, %d %d", src[i], dst[i]);
+			break;
+		}
+	}
+
+	/* Setup test memory */
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++)
+		src[i] = (char)i;
+	memset(dst, 0, TEST_MEMCPY_SIZE);
+
+	/* Check for enqueue with submit */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed(test_dev_id, 0, 1, &last_idx,
+				       &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u",
+				last_idx);
+	RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++) {
+		if (src[i] != dst[i]) {
+			RTE_TEST_ASSERT_EQUAL(src[i], dst[i],
+				"Failed to copy memory, %d %d", src[i], dst[i]);
+			break;
+		}
+	}
+
+	/* Stop dmadev to make sure dmadev to a known state */
+	ret = rte_dmadev_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_completed_status(void)
+{
+	enum rte_dma_status_code status[1] = { 1 };
+	uint16_t last_idx = 1;
+	uint16_t cpl_ret, i;
+	int ret;
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dmadev_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Check for enqueue with submit */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed_status(test_dev_id, 0, 1, &last_idx,
+					      status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u",
+				last_idx);
+	for (i = 0; i < RTE_DIM(status); i++)
+		RTE_TEST_ASSERT_EQUAL(status[i], 0,
+				"Failed to completed status, %d", status[i]);
+
+	/* Check do completed status again */
+	cpl_ret = rte_dmadev_completed_status(test_dev_id, 0, 1, &last_idx,
+					      status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to completed status");
+
+	/* Check for enqueue with submit again */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed_status(test_dev_id, 0, 1, &last_idx,
+					      status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u",
+				last_idx);
+	for (i = 0; i < RTE_DIM(status); i++)
+		RTE_TEST_ASSERT_EQUAL(status[i], 0,
+				"Failed to completed status, %d", status[i]);
+
+	/* Stop dmadev to make sure dmadev to a known state */
+	ret = rte_dmadev_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+int
+test_dma_skeleton(uint16_t dev_id)
+{
+	int ret = testsuite_setup(dev_id);
+	if (ret) {
+		SKELDMA_TEST_INFO("testsuite setup fail!");
+		return -1;
+	}
+
+	/* If the testcase exit successfully, ensure that the test dmadev exist
+	 * and the dmadev is in the stopped state.
+	 */
+	SKELDMA_TEST_RUN(test_dmadev_get_dev_id);
+	SKELDMA_TEST_RUN(test_dmadev_is_valid_dev);
+	SKELDMA_TEST_RUN(test_dmadev_count);
+	SKELDMA_TEST_RUN(test_dmadev_info_get);
+	SKELDMA_TEST_RUN(test_dmadev_configure);
+	SKELDMA_TEST_RUN(test_dmadev_vchan_setup);
+	SKELDMA_TEST_RUN(test_dmadev_start_stop);
+	SKELDMA_TEST_RUN(test_dmadev_stats);
+	SKELDMA_TEST_RUN(test_dmadev_completed);
+	SKELDMA_TEST_RUN(test_dmadev_completed_status);
+
+	testsuite_teardown();
+
+	SKELDMA_TEST_INFO("Total tests   : %d\n", total);
+	SKELDMA_TEST_INFO("Passed        : %d\n", passed);
+	SKELDMA_TEST_INFO("Failed        : %d\n", failed);
+
+	if (failed)
+		return -1;
+
+	return 0;
+};
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v16 8/9] test: enable dmadev skeleton test
  2021-08-23  3:31 ` [dpdk-dev] [PATCH v16 0/9] support dmadev Chengwen Feng
                     ` (6 preceding siblings ...)
  2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 7/9] dma/skeleton: add test cases Chengwen Feng
@ 2021-08-23  3:31   ` Chengwen Feng
  2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 9/9] maintainers: add for dmadev Chengwen Feng
  8 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-23  3:31 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

Skeleton dmadevice test cases are part of driver layer. This patch
allows test cases to be executed using 'dma_autotest' command in test
framework.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
 app/test/meson.build   |  3 +++
 app/test/test_dmadev.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 56 insertions(+)
 create mode 100644 app/test/test_dmadev.c

diff --git a/app/test/meson.build b/app/test/meson.build
index a761168..881cb4f 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -43,6 +43,7 @@ test_sources = files(
         'test_debug.c',
         'test_distributor.c',
         'test_distributor_perf.c',
+        'test_dmadev.c',
         'test_eal_flags.c',
         'test_eal_fs.c',
         'test_efd.c',
@@ -162,6 +163,7 @@ test_deps = [
         'cmdline',
         'cryptodev',
         'distributor',
+        'dmadev',
         'efd',
         'ethdev',
         'eventdev',
@@ -333,6 +335,7 @@ driver_test_names = [
         'cryptodev_sw_mvsam_autotest',
         'cryptodev_sw_snow3g_autotest',
         'cryptodev_sw_zuc_autotest',
+        'dmadev_autotest',
         'eventdev_selftest_octeontx',
         'eventdev_selftest_sw',
         'rawdev_autotest',
diff --git a/app/test/test_dmadev.c b/app/test/test_dmadev.c
new file mode 100644
index 0000000..90e8faa
--- /dev/null
+++ b/app/test/test_dmadev.c
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_dmadev.h>
+#include <rte_bus_vdev.h>
+
+#include "test.h"
+
+static int
+test_dmadev_selftest_skeleton(void)
+{
+	const char *pmd = "dma_skeleton";
+	int ret;
+
+	printf("\n### Test dmadev infrastructure using skeleton driver\n");
+	rte_vdev_init(pmd, NULL);
+	ret = rte_dmadev_selftest(rte_dmadev_get_dev_id(pmd));
+	rte_vdev_uninit(pmd);
+
+	return ret;
+}
+
+static int
+test_dmadev_selftests(void)
+{
+	const int count = rte_dmadev_count();
+	int ret = 0;
+	int i;
+
+	/* basic sanity on dmadev infrastructure */
+	if (test_dmadev_selftest_skeleton() < 0)
+		return -1;
+
+	/* now run self-test on all dmadevs */
+	if (count > 0)
+		printf("\n### Run selftest on each available dmadev\n");
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].state != RTE_DMADEV_ATTACHED)
+			continue;
+		int result = rte_dmadev_selftest(i);
+		printf("dmadev %u (%s) selftest: %s\n", i,
+			rte_dmadevices[i].data->dev_name,
+			result == 0 ? "Passed" : "Failed");
+		ret |= result;
+	}
+
+	return ret;
+}
+
+REGISTER_TEST_COMMAND(dmadev_autotest, test_dmadev_selftests);
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v16 9/9] maintainers: add for dmadev
  2021-08-23  3:31 ` [dpdk-dev] [PATCH v16 0/9] support dmadev Chengwen Feng
                     ` (7 preceding siblings ...)
  2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 8/9] test: enable dmadev skeleton test Chengwen Feng
@ 2021-08-23  3:31   ` Chengwen Feng
  8 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-23  3:31 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch add myself as dmadev's maintainer and update release notes.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
 MAINTAINERS                            | 7 +++++++
 doc/guides/rel_notes/release_21_11.rst | 6 ++++++
 2 files changed, 13 insertions(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index 266f5ac..1661428 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -496,6 +496,13 @@ F: drivers/raw/skeleton/
 F: app/test/test_rawdev.c
 F: doc/guides/prog_guide/rawdev.rst
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+F: drivers/dma/skeleton/
+F: app/test/test_dmadev.c
+F: doc/guides/prog_guide/dmadev.rst
+
 
 Memory Pool Drivers
 -------------------
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index d707a55..0d3c38f 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -55,6 +55,12 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Added dmadev library support.**
+
+  The dmadev library provides a DMA device framework for management and
+  provisioning of hardware and software DMA poll mode drivers, defining generic
+  APIs which support a number of different DMA operations.
+
 
 Removed Items
 -------------
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v15 1/6] dmadev: introduce DMA device library public APIs
  2021-08-19 14:52     ` Bruce Richardson
@ 2021-08-23  3:43       ` fengchengwen
  0 siblings, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-08-23  3:43 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

Hi Bruce and All,

The v16 use following define, and it also include dma skeleton and ut.
   struct rte_dmadev_stats {
 	uint64_t submitted;
 	uint64_t completed;
 	uint64_t errors;
   }

Feedback welcome, thanks.

On 2021/8/19 22:52, Bruce Richardson wrote:
> On Fri, Aug 13, 2021 at 05:09:29PM +0800, Chengwen Feng wrote:
>> The 'dmadevice' is a generic type of DMA device.
>>
>> This patch introduce the 'dmadevice' public APIs which expose generic
>> operations that can enable configuration and I/O with the DMA devices.
>>
>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
>> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
>> Acked-by: Morten Brørup <mb@smartsharesystems.com>
>> Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
>> ---
> one minor comment for clarification
> 
>> +/**
>> + * rte_dmadev_stats - running statistics.
>> + */
>> +struct rte_dmadev_stats {
>> +	uint64_t submitted_count;
>> +	/**< Count of operations which were submitted to hardware. */
>> +	uint64_t completed_fail_count;
>> +	/**< Count of operations which failed to complete. */
>> +	uint64_t completed_count;
>> +	/**< Count of operations which successfully complete. */
>> +};
> 
> The name of the last variable and the comment on it seem mismatched. The
> name implies that it's all completed ops, i.e. to get successful only you
> do "stats.completed_count - stats.completed_fail_count", while the comment
> says that it's successful only. Therefore I suggest:
> 
> * We rename the last two vars to "completed_fail" and "completed_success"
>   for clarity OR
> * We redefine "completed_count" to be the full completed count of both
>   success and failure.
> 
> I have a slight preference for the latter option, but either can work.
> 
> /Bruce
> 
> PS: We probably don't need "count" on any of these values, based on two
> options above suggest structs as:
> 
>   struct rte_dmadev_stats {
> 	uint64_t submitted;
> 	uint64_t failed;
> 	uint64_t successful;
>   };
> 
> OR:
> 
>   struct rte_dmadev_stats {
> 	uint64_t submitted;
> 	uint64_t completed;
> 	uint64_t errors;
>   }
> .
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v16 7/9] dma/skeleton: add test cases
  2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 7/9] dma/skeleton: add test cases Chengwen Feng
@ 2021-08-23 14:03     ` Bruce Richardson
  2021-08-26  9:30       ` fengchengwen
  0 siblings, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-08-23 14:03 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

On Mon, Aug 23, 2021 at 11:31:32AM +0800, Chengwen Feng wrote:
> Patch introduces dmadev unit testcase for validation against the
> skeleton dmadev PMD implementation.
> 
> Test cases are added along with the skeleton driver implementation.
> It can be enabled by using vdev argument to any DPDK binary:
> 
> 	--vdev="dma_skeleton,selftest=1"
> 
> In case 'selftest=1' is not provided, autotest doesn't execute the
> test cases but the vdev is still available for application use.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> ---

Having self-tests was useful for rawdev implementations as each rawdev was
(potentially) a completely different device type so no common set of tests
could really be written for them. However, for dmadev, we have a common set
of functions and APIs, so I think that the tests provided here should be in
a general test-case set.

We are preparing to upstream a test suite for DMA devices, based off the
work we have in progress on our own drivers, and we'll look to include some
of the tests from here in that for consistency.  Please wait for that set -
hopefully appearing the next day or two - before doing additional work on
this set, as our set includes quite a comprehensive set of functional tests
in it, which would go well with the API tests here.

In terms of the skeleton dmadev itself, I'm not fully convinced of its
usefulness, but it does allow unit testing of the APIs in the absense of
dma device hardware. Whether that utility is worth the maintenance cost,
though, I'm not sure.

Regards,
/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v16 7/9] dma/skeleton: add test cases
  2021-08-23 14:03     ` Bruce Richardson
@ 2021-08-26  9:30       ` fengchengwen
  0 siblings, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-08-26  9:30 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

The skeleton_dma mainly focus on dma framework ut, as it currently functions.

Agree add more general testcase which run with diffenernt hardware drivers, these
testcases mainly used to test drivers (not dma framework). In this
way, the driver's selftest ops can be implemented very easily.


On 2021/8/23 22:03, Bruce Richardson wrote:
> On Mon, Aug 23, 2021 at 11:31:32AM +0800, Chengwen Feng wrote:
>> Patch introduces dmadev unit testcase for validation against the
>> skeleton dmadev PMD implementation.
>>
>> Test cases are added along with the skeleton driver implementation.
>> It can be enabled by using vdev argument to any DPDK binary:
>>
>> 	--vdev="dma_skeleton,selftest=1"
>>
>> In case 'selftest=1' is not provided, autotest doesn't execute the
>> test cases but the vdev is still available for application use.
>>
>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
>> ---
> 
> Having self-tests was useful for rawdev implementations as each rawdev was
> (potentially) a completely different device type so no common set of tests
> could really be written for them. However, for dmadev, we have a common set
> of functions and APIs, so I think that the tests provided here should be in
> a general test-case set.
> 
> We are preparing to upstream a test suite for DMA devices, based off the
> work we have in progress on our own drivers, and we'll look to include some
> of the tests from here in that for consistency.  Please wait for that set -
> hopefully appearing the next day or two - before doing additional work on
> this set, as our set includes quite a comprehensive set of functional tests
> in it, which would go well with the API tests here.
> 
> In terms of the skeleton dmadev itself, I'm not fully convinced of its
> usefulness, but it does allow unit testing of the APIs in the absense of
> dma device hardware. Whether that utility is worth the maintenance cost,
> though, I'm not sure.
> 
> Regards,
> /Bruce
> .
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v16 6/9] dma/skeleton: introduce skeleton dmadev driver
  2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 6/9] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
@ 2021-08-26 18:39     ` Bruce Richardson
  0 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-08-26 18:39 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

On Mon, Aug 23, 2021 at 11:31:31AM +0800, Chengwen Feng wrote:
> Skeleton dmadevice driver, on the lines of rawdev skeleton, is for
> showcasing of the dmadev library. This driver implements cpucopy 'DMA',
> so that a test module can be developed.
> 
> Design of skeleton involves a virtual device which is plugged into VDEV
> bus on initialization.
> 
> Also, enable compilation of dmadev skeleton drivers.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> ---

FYI: When running some compilation testing on my test-case patches [1], I had
build errors flagged in this driver, when building for 32-bit.

../../drivers/dma/skeleton/skeleton_dmadev.c: In function ‘skeldma_copy’:
../../drivers/dma/skeleton/skeleton_dmadev.c:349:14: error: cast to pointer from integer of different size [-Werror=int-to-pointer-cast]
  349 |  desc->src = (void *)src;
      |              ^
../../drivers/dma/skeleton/skeleton_dmadev.c:350:14: error: cast to pointer from integer of different size [-Werror=int-to-pointer-cast]
  350 |  desc->dst = (void *)dst;
      |              ^
cc1: all warnings being treated as errors


Regards,
/Bruce

[1] http://patches.dpdk.org/project/dpdk/list/?series=18477

^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v17 0/8] support dmadev
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (19 preceding siblings ...)
  2021-08-23  3:31 ` [dpdk-dev] [PATCH v16 0/9] support dmadev Chengwen Feng
@ 2021-08-28  7:29 ` Chengwen Feng
  2021-08-28  7:29   ` [dpdk-dev] [PATCH v17 1/8] dmadev: introduce DMA device library public APIs Chengwen Feng
                     ` (7 more replies)
  2021-09-02 10:54 ` [dpdk-dev] [PATCH v18 0/8] support dmadev Chengwen Feng
                   ` (8 subsequent siblings)
  29 siblings, 8 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-28  7:29 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch set contains eight patch for new add dmadev.

Chengwen Feng (8):
  dmadev: introduce DMA device library public APIs
  dmadev: introduce DMA device library internal header
  dmadev: introduce DMA device library PMD header
  dmadev: introduce DMA device library implementation
  doc: add DMA device library guide
  dma/skeleton: introduce skeleton dmadev driver
  app/test: add dmadev API test
  maintainers: add for dmadev

---
v17:
* remove rte_dmadev_selftest() API.
* move dmadev API test from dma/skeleton to app/test.
* fix compile error of dma/skeleton driver when building for x86-32.
* fix iol spell check warning of dmadev.rst.
v16:
* redefine struct rte_dmadev_stats with fields:
  submitted, completed, errors.
* add dma skeleton.
* add dmadev ut.
v15:
* fix typo and readability of prog_guide.
* fix some public APIs return value comment inconsistent with the impl.
* add return -ENOSPC comment if enqueue fail due to no space.
v14:
* rte_dmadev_vchan_setup add vchan parameter.
* rename max_vchans to nb_vchans of struct rte_dmadev_conf.
* fix dmadev programming guide doxygen warning.

 MAINTAINERS                            |    7 +
 app/test/meson.build                   |    4 +
 app/test/test_dmadev.c                 |   45 ++
 app/test/test_dmadev_api.c             |  517 ++++++++++++++++
 config/rte_config.h                    |    3 +
 doc/api/doxy-api-index.md              |    1 +
 doc/api/doxy-api.conf.in               |    1 +
 doc/guides/prog_guide/dmadev.rst       |  125 ++++
 doc/guides/prog_guide/img/dmadev.svg   |  283 +++++++++
 doc/guides/prog_guide/index.rst        |    1 +
 doc/guides/rel_notes/release_21_11.rst |    5 +
 drivers/dma/meson.build                |   11 +
 drivers/dma/skeleton/meson.build       |    7 +
 drivers/dma/skeleton/skeleton_dmadev.c |  594 ++++++++++++++++++
 drivers/dma/skeleton/skeleton_dmadev.h |   73 +++
 drivers/dma/skeleton/version.map       |    3 +
 drivers/meson.build                    |    1 +
 lib/dmadev/meson.build                 |    7 +
 lib/dmadev/rte_dmadev.c                |  557 +++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 1035 ++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h           |  178 ++++++
 lib/dmadev/rte_dmadev_pmd.h            |   72 +++
 lib/dmadev/version.map                 |   35 ++
 lib/meson.build                        |    1 +
 24 files changed, 3566 insertions(+)
 create mode 100644 app/test/test_dmadev.c
 create mode 100644 app/test/test_dmadev_api.c
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg
 create mode 100644 drivers/dma/meson.build
 create mode 100644 drivers/dma/skeleton/meson.build
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
 create mode 100644 drivers/dma/skeleton/version.map
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v17 1/8] dmadev: introduce DMA device library public APIs
  2021-08-28  7:29 ` [dpdk-dev] [PATCH v17 0/8] support dmadev Chengwen Feng
@ 2021-08-28  7:29   ` Chengwen Feng
  2021-08-28  7:30   ` [dpdk-dev] [PATCH v17 2/8] dmadev: introduce DMA device library internal header Chengwen Feng
                     ` (6 subsequent siblings)
  7 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-28  7:29 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

The 'dmadevice' is a generic type of DMA device.

This patch introduce the 'dmadevice' public APIs which expose generic
operations that can enable configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
---
 doc/api/doxy-api-index.md |   1 +
 doc/api/doxy-api.conf.in  |   1 +
 lib/dmadev/meson.build    |   4 +
 lib/dmadev/rte_dmadev.h   | 939 ++++++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/version.map    |  24 ++
 lib/meson.build           |   1 +
 6 files changed, 970 insertions(+)
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/version.map

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107..ce08250 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -27,6 +27,7 @@ The public API headers are grouped by topics:
   [event_timer_adapter]    (@ref rte_event_timer_adapter.h),
   [event_crypto_adapter]   (@ref rte_event_crypto_adapter.h),
   [rawdev]             (@ref rte_rawdev.h),
+  [dmadev]             (@ref rte_dmadev.h),
   [metrics]            (@ref rte_metrics.h),
   [bitrate]            (@ref rte_bitrate.h),
   [latency]            (@ref rte_latencystats.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 325a019..a44a92b 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -34,6 +34,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/cmdline \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
+                          @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/distributor \
                           @TOPDIR@/lib/efd \
                           @TOPDIR@/lib/ethdev \
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000..6d5bd85
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+headers = files('rte_dmadev.h')
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000..e523ebd
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,939 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ * Copyright(c) 2021 Marvell International Ltd.
+ * Copyright(c) 2021 SmartShare Systems.
+ */
+
+#ifndef _RTE_DMADEV_H_
+#define _RTE_DMADEV_H_
+
+/**
+ * @file rte_dmadev.h
+ *
+ * RTE DMA (Direct Memory Access) device APIs.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW-DMA-channel |               | HW-DMA-channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW-DMA-Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel. e.g. Application could create
+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+ * virtual DMA channel 1 for memory-to-device transfer scenario.
+ *
+ * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
+ * PCI/SoC device probing phase performed at EAL initialization time. And could
+ * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
+ * phase.
+ *
+ * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dmadev_configure()
+ *     - rte_dmadev_vchan_setup()
+ *     - rte_dmadev_start()
+ *
+ * Then, the application can invoke dataplane APIs to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dmadev_configure() or rte_dmadev_vchan_setup()), it must invoke
+ * rte_dmadev_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dmadev_start() again. The dataplane APIs should not be
+ * invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the
+ * rte_dmadev_close() function.
+ *
+ * The dataplane APIs include two parts:
+ * The first part is the submission of operation requests:
+ *     - rte_dmadev_copy()
+ *     - rte_dmadev_copy_sg()
+ *     - rte_dmadev_fill()
+ *     - rte_dmadev_submit()
+ *
+ * These APIs could work with different virtual DMA channels which have
+ * different contexts.
+ *
+ * The first three APIs are used to submit the operation request to the virtual
+ * DMA channel, if the submission is successful, an uint16_t ring_idx is
+ * returned, otherwise a negative number is returned.
+ *
+ * The last API was used to issue doorbell to hardware, and also there are flags
+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
+ * same work.
+ *
+ * The second part is to obtain the result of requests:
+ *     - rte_dmadev_completed()
+ *         - return the number of operation requests completed successfully.
+ *     - rte_dmadev_completed_status()
+ *         - return the number of operation requests completed.
+ *
+ * @note If the dmadev works in silent mode (@see RTE_DMADEV_CAPA_SILENT),
+ * application does not invoke the above two completed APIs.
+ *
+ * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
+ * rte_dmadev_fill()) returned, the rules are as follows:
+ *     - ring_idx for each virtual DMA channel are independent.
+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *       when it reach UINT16_MAX, it wraps back to zero.
+ *     - This ring_idx can be used by applications to track per-operation
+ *       metadata in an application-defined circular ring.
+ *     - The initial ring_idx of a virtual DMA channel is zero, after the
+ *       device is stopped, the ring_idx needs to be reset to zero.
+ *
+ * One example:
+ *     - step-1: start one dmadev
+ *     - step-2: enqueue a copy operation, the ring_idx return is 0
+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1
+ *     - ...
+ *     - step-101: stop the dmadev
+ *     - step-102: start the dmadev
+ *     - step-103: enqueue a copy operation, the cookie return is 0
+ *     - ...
+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *
+ * The DMA operation address used in enqueue APIs (i.e. rte_dmadev_copy(),
+ * rte_dmadev_copy_sg(), rte_dmadev_fill()) defined as rte_iova_t type. The
+ * dmadev supports two types of address: memory address and device address.
+ *
+ * - memory address: the source and destination address of the memory-to-memory
+ * transfer type, or the source address of the memory-to-device transfer type,
+ * or the destination address of the device-to-memory transfer type.
+ * @note If the device support SVA (@see RTE_DMADEV_CAPA_SVA), the memory
+ * address can be any VA address, otherwise it must be an IOVA address.
+ *
+ * - device address: the source and destination address of the device-to-device
+ * transfer type, or the source address of the device-to-memory transfer type,
+ * or the destination address of the memory-to-device transfer type.
+ *
+ * By default, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
+ *
+ */
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the device identifier for the named DMA device.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   Returns DMA device identifier on success.
+ *   - <0: Failure to find named DMA device.
+ */
+__rte_experimental
+int
+rte_dmadev_get_dev_id(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_count(void);
+
+/* Enumerates DMA device capabilities. */
+#define RTE_DMADEV_CAPA_MEM_TO_MEM	(1ull << 0)
+/**< DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_MEM_TO_DEV	(1ull << 1)
+/**< DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_MEM	(1ull << 2)
+/**< DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_DEV	(1ull << 3)
+/**< DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_SVA		(1ull << 4)
+/**< DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_SILENT		(1ull << 5)
+/**< DMA device support work in silent mode.
+ * In this mode, application don't required to invoke rte_dmadev_completed*()
+ * API.
+ *
+ * @see struct rte_dmadev_conf::silent_mode
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY	(1ull << 32)
+/**< DMA device support copy ops.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY_SG	(1ull << 33)
+/**< DMA device support scatter-gather list copy ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_FILL	(1ull << 34)
+/**< DMA device support fill ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ */
+struct rte_dmadev_info {
+	struct rte_device *device; /**< Generic Device information. */
+	uint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channels supported. */
+	uint16_t max_desc;
+	/**< Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/**< Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t max_sges;
+	/**< Maximum number of source or destination scatter-gather entry
+	 * supported.
+	 * If the device does not support COPY_SG capability, this value can be
+	 * zero.
+	 * If the device supports COPY_SG capability, then rte_dmadev_copy_sg()
+	 * parameter nb_src/nb_dst should not exceed this value.
+	 */
+	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ */
+struct rte_dmadev_conf {
+	uint16_t nb_vchans;
+	/**< The number of virtual DMA channels to set up for the DMA device.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dmadev_info which get from rte_dmadev_info_get().
+	 */
+	bool enable_silent;
+	/**< Indicates whether to enable silent mode.
+	 * false-default mode, true-silent mode.
+	 * This value can be set to true only when the SILENT capability is
+	 * supported.
+	 *
+	 * @see RTE_DMADEV_CAPA_SILENT
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dmadev_conf
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_start(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dmadev_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_stop(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_close(uint16_t dev_id);
+
+/**
+ * rte_dma_direction - DMA transfer direction defines.
+ */
+enum rte_dma_direction {
+	RTE_DMA_DIR_MEM_TO_MEM,
+	/**< DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV,
+	/**< DMA transfer direction - from memory to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is SoCs memory) to device (which is host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM,
+	/**< DMA transfer direction - from device to memory.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to memory (which is SoCs memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV,
+	/**< DMA transfer direction - from device to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to the device (which is another host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+};
+
+/**
+ * enum rte_dmadev_port_type - DMA access port type defines.
+ *
+ * @see struct rte_dmadev_port_param::port_type
+ */
+enum rte_dmadev_port_type {
+	RTE_DMADEV_PORT_NONE,
+	RTE_DMADEV_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dmadev_vchan_conf::src_port
+ * @see struct rte_dmadev_vchan_conf::dst_port
+ */
+struct rte_dmadev_port_param {
+	enum rte_dmadev_port_type port_type;
+	/**< The device access port type.
+	 * @see enum rte_dmadev_port_type
+	 */
+	union {
+		/** PCIe access port parameters.
+		 *
+		 * The following model shows SoC's PCIe module connects to
+		 * multiple PCIe hosts and multiple endpoints. The PCIe module
+		 * has an integrated DMA controller.
+		 *
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * \code{.unparsed}
+		 * System Bus
+		 *    |     ----------PCIe module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIe Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIe Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIe Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * \endcode
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		struct {
+			uint64_t coreid : 4; /**< PCIe core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			uint64_t pasid : 20;
+			/**< The pasid filed in TLP packet. */
+			uint64_t attr : 3;
+			/**< The attributes filed in TLP packet. */
+			uint64_t ph : 2;
+			/**< The processing hint filed in TLP packet. */
+			uint64_t st : 16;
+			/**< The steering tag filed in TLP packet. */
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ */
+struct rte_dmadev_vchan_conf {
+	enum rte_dma_direction direction;
+	/**< Transfer direction
+	 * @see enum rte_dma_direction
+	 */
+	uint16_t nb_desc;
+	/**< Number of descriptor for the virtual DMA channel */
+	struct rte_dmadev_port_param src_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+	struct rte_dmadev_port_param dst_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel. The value must be in the range
+ *   [0, nb_vchans - 1] previously supplied to rte_dmadev_configure().
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dmadev_vchan_conf object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+		       const struct rte_dmadev_vchan_conf *conf);
+
+/**
+ * rte_dmadev_stats - running statistics.
+ */
+struct rte_dmadev_stats {
+	uint64_t submitted;
+	/**< Count of operations which were submitted to hardware. */
+	uint64_t completed;
+	/**< Count of operations which were completed. */
+	uint64_t errors;
+	/**< Count of operations which failed to complete. */
+};
+
+#define RTE_DMADEV_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dmadev_stats
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f);
+
+/**
+ * rte_dma_status_code - DMA transfer result status code defines.
+ */
+enum rte_dma_status_code {
+	RTE_DMA_STATUS_SUCCESSFUL,
+	/**< The operation completed successfully. */
+	RTE_DMA_STATUS_USER_ABORT,
+	/**< The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPTED,
+	/**< The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/**< The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/**< The operation failed to complete due invalid destination
+	 * address.
+	 */
+	RTE_DMA_STATUS_INVALID_ADDR,
+	/**< The operation failed to complete due invalid source or destination
+	 * address, cover the case that only knows the address error, but not
+	 * sure which address error.
+	 */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/**< The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/**< The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/**< The operation failed to complete due bus err. */
+	RTE_DMA_STATUS_DATA_POISION,
+	/**< The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/**< The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/**< The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the memory-to-device/
+	 * device-to-memory/device-to-device transfer scenario.
+	 */
+	RTE_DMA_STATUS_ERROR_UNKNOWN = 0x100,
+	/**< The operation failed to complete due unknown reason.
+	 * The initial value is 256, which reserves space for future errors.
+	 */
+};
+
+/**
+ * rte_dmadev_sge - can hold scatter-gather DMA operation request entry.
+ */
+struct rte_dmadev_sge {
+	rte_iova_t addr; /**< The DMA operation address. */
+	uint32_t length; /**< The DMA operation length. */
+};
+
+/* DMA flags to augment operation preparation. */
+#define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
+/**< DMA fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ * If the specify DMA HW works in-order (it means it has default fence between
+ * operations), this flag could be NOP.
+ *
+ * @see rte_dmadev_copy()
+ * @see rte_dmadev_copy_sg()
+ * @see rte_dmadev_fill()
+ */
+
+#define RTE_DMA_OP_FLAG_SUBMIT	(1ull << 1)
+/**< DMA submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ */
+
+#define RTE_DMA_OP_FLAG_LLC	(1ull << 2)
+/**< DMA write data to low level cache hint.
+ * Used for performance optimization, this is just a hint, and there is no
+ * capability bit for this, driver should not return error if this flag was set.
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+		uint32_t length, uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter-gather list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter-gather list copy operation to be performed by
+ * hardware, if the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then
+ * trigger doorbell to begin this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The pointer of source scatter-gather entry array.
+ * @param dst
+ *   The pointer of destination scatter-gather entry array.
+ * @param nb_src
+ *   The number of source scatter-gather entry.
+ *   @see struct rte_dmadev_info::max_sges
+ * @param nb_dst
+ *   The number of destination scatter-gather entry.
+ *   @see struct rte_dmadev_info::max_sges
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
+		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
+		   uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
+		rte_iova_t dst, uint32_t length, uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dmadev_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed. This return value
+ *   must be less than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		     uint16_t *last_idx, bool *has_error);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been completed, and the
+ * operations result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's index.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   This is a pointer to an array of length 'nb_cpls' that holds the completion
+ *   status code of each operation.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that completed. This return value must be less
+ *   than or equal to the value of nb_cpls.
+ *   If this number is greater than zero (assuming n), then n values in the
+ *   status array are also set.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
+			    const uint16_t nb_cpls, uint16_t *last_idx,
+			    enum rte_dma_status_code *status);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000..2e37882
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,24 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dmadev_close;
+	rte_dmadev_completed;
+	rte_dmadev_completed_status;
+	rte_dmadev_configure;
+	rte_dmadev_copy;
+	rte_dmadev_copy_sg;
+	rte_dmadev_count;
+	rte_dmadev_dump;
+	rte_dmadev_fill;
+	rte_dmadev_get_dev_id;
+	rte_dmadev_info_get;
+	rte_dmadev_is_valid_dev;
+	rte_dmadev_start;
+	rte_dmadev_stats_get;
+	rte_dmadev_stats_reset;
+	rte_dmadev_stop;
+	rte_dmadev_submit;
+	rte_dmadev_vchan_setup;
+
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4..a542c23 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -44,6 +44,7 @@ libraries = [
         'power',
         'pdump',
         'rawdev',
+        'dmadev',
         'regexdev',
         'rib',
         'reorder',
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v17 2/8] dmadev: introduce DMA device library internal header
  2021-08-28  7:29 ` [dpdk-dev] [PATCH v17 0/8] support dmadev Chengwen Feng
  2021-08-28  7:29   ` [dpdk-dev] [PATCH v17 1/8] dmadev: introduce DMA device library public APIs Chengwen Feng
@ 2021-08-28  7:30   ` Chengwen Feng
  2021-08-28  7:30   ` [dpdk-dev] [PATCH v17 3/8] dmadev: introduce DMA device library PMD header Chengwen Feng
                     ` (5 subsequent siblings)
  7 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-28  7:30 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce DMA device library internal header, which contains
internal data types that are used by the DMA devices in order to expose
their ops to the class.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 lib/dmadev/meson.build       |   1 +
 lib/dmadev/rte_dmadev_core.h | 176 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 177 insertions(+)
 create mode 100644 lib/dmadev/rte_dmadev_core.h

diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index 6d5bd85..f421ec1 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -2,3 +2,4 @@
 # Copyright(c) 2021 HiSilicon Limited.
 
 headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000..c44d88e
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#ifndef _RTE_DMADEV_CORE_H_
+#define _RTE_DMADEV_CORE_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device internal header.
+ *
+ * This header contains internal data types, that are used by the DMA devices
+ * in order to expose their ops to the class.
+ *
+ * Applications should not use these API directly.
+ *
+ */
+
+struct rte_dmadev;
+
+typedef int (*rte_dmadev_info_get_t)(const struct rte_dmadev *dev,
+				     struct rte_dmadev_info *dev_info,
+				     uint32_t info_sz);
+/**< @internal Used to get device information of a device. */
+
+typedef int (*rte_dmadev_configure_t)(struct rte_dmadev *dev,
+				      const struct rte_dmadev_conf *dev_conf);
+/**< @internal Used to configure a device. */
+
+typedef int (*rte_dmadev_start_t)(struct rte_dmadev *dev);
+/**< @internal Used to start a configured device. */
+
+typedef int (*rte_dmadev_stop_t)(struct rte_dmadev *dev);
+/**< @internal Used to stop a configured device. */
+
+typedef int (*rte_dmadev_close_t)(struct rte_dmadev *dev);
+/**< @internal Used to close a configured device. */
+
+typedef int (*rte_dmadev_vchan_setup_t)(struct rte_dmadev *dev, uint16_t vchan,
+				const struct rte_dmadev_vchan_conf *conf);
+/**< @internal Used to allocate and set up a virtual DMA channel. */
+
+typedef int (*rte_dmadev_stats_get_t)(const struct rte_dmadev *dev,
+			uint16_t vchan, struct rte_dmadev_stats *stats,
+			uint32_t stats_sz);
+/**< @internal Used to retrieve basic statistics. */
+
+typedef int (*rte_dmadev_stats_reset_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to reset basic statistics. */
+
+typedef int (*rte_dmadev_dump_t)(const struct rte_dmadev *dev, FILE *f);
+/**< @internal Used to dump internal information. */
+
+typedef int (*rte_dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 rte_iova_t src, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a copy operation. */
+
+typedef int (*rte_dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
+				    const struct rte_dmadev_sge *src,
+				    const struct rte_dmadev_sge *dst,
+				    uint16_t nb_src, uint16_t nb_dst,
+				    uint64_t flags);
+/**< @internal Used to enqueue a scatter-gather list copy operation. */
+
+typedef int (*rte_dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 uint64_t pattern, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a fill operation. */
+
+typedef int (*rte_dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to trigger hardware to begin working. */
+
+typedef uint16_t (*rte_dmadev_completed_t)(struct rte_dmadev *dev,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+/**< @internal Used to return number of successful completed operations. */
+
+typedef uint16_t (*rte_dmadev_completed_status_t)(struct rte_dmadev *dev,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+/**< @internal Used to return number of completed operations. */
+
+/**
+ * Possible states of a DMA device.
+ */
+enum rte_dmadev_state {
+	RTE_DMADEV_UNUSED = 0,
+	/**< Device is unused before being probed. */
+	RTE_DMADEV_ATTACHED,
+	/**< Device is attached when allocated in probing. */
+};
+
+/**
+ * DMA device operations function pointer table
+ */
+struct rte_dmadev_ops {
+	rte_dmadev_info_get_t dev_info_get;
+	rte_dmadev_configure_t dev_configure;
+	rte_dmadev_start_t dev_start;
+	rte_dmadev_stop_t dev_stop;
+	rte_dmadev_close_t dev_close;
+	rte_dmadev_vchan_setup_t vchan_setup;
+	rte_dmadev_stats_get_t stats_get;
+	rte_dmadev_stats_reset_t stats_reset;
+	rte_dmadev_dump_t dev_dump;
+};
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_dmadev_data {
+	void *dev_private;
+	/**< PMD-specific private data.
+	 * This is a copy of the 'dev_private' field in the 'struct rte_dmadev'
+	 * from primary process, it is used by the secondary process to get
+	 * dev_private information.
+	 */
+	uint16_t dev_id; /**< Device [external] identifier. */
+	char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ *
+ * The dataplane APIs are located at the beginning of the structure, along
+ * with the pointer to where all the data elements for the particular device
+ * are stored in shared memory. This split scheme allows the function pointer
+ * and driver data to be per-process, while the actual configuration data for
+ * the device is shared.
+ * And the 'dev_private' field was placed in the first cache line to optimize
+ * performance because the PMD driver mainly depends on this field.
+ */
+struct rte_dmadev {
+	rte_dmadev_copy_t copy;
+	rte_dmadev_copy_sg_t copy_sg;
+	rte_dmadev_fill_t fill;
+	rte_dmadev_submit_t submit;
+	rte_dmadev_completed_t completed;
+	rte_dmadev_completed_status_t completed_status;
+	void *reserved_ptr; /**< Reserved for future IO function. */
+	void *dev_private;
+	/**< PMD-specific private data.
+	 *
+	 * - If is the primary process, after dmadev allocated by
+	 * rte_dmadev_pmd_allocate(), the PCI/SoC device probing should
+	 * initialize this field, and copy it's value to the 'dev_private'
+	 * field of 'struct rte_dmadev_data' which pointer by 'data' filed.
+	 *
+	 * - If is the secondary process, dmadev framework will initialize this
+	 * field by copy from 'dev_private' field of 'struct rte_dmadev_data'
+	 * which initialized by primary process.
+	 *
+	 * @note It's the primary process responsibility to deinitialize this
+	 * field after invoke rte_dmadev_pmd_release() in the PCI/SoC device
+	 * removing stage.
+	 */
+	struct rte_dmadev_data *data; /**< Pointer to device data. */
+	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
+	struct rte_device *device;
+	/**< Device info which supplied during device initialization. */
+	enum rte_dmadev_state state; /**< Flag indicating the device state. */
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+} __rte_cache_aligned;
+
+#endif /* _RTE_DMADEV_CORE_H_ */
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v17 3/8] dmadev: introduce DMA device library PMD header
  2021-08-28  7:29 ` [dpdk-dev] [PATCH v17 0/8] support dmadev Chengwen Feng
  2021-08-28  7:29   ` [dpdk-dev] [PATCH v17 1/8] dmadev: introduce DMA device library public APIs Chengwen Feng
  2021-08-28  7:30   ` [dpdk-dev] [PATCH v17 2/8] dmadev: introduce DMA device library internal header Chengwen Feng
@ 2021-08-28  7:30   ` Chengwen Feng
  2021-08-28  7:30   ` [dpdk-dev] [PATCH v17 4/8] dmadev: introduce DMA device library implementation Chengwen Feng
                     ` (4 subsequent siblings)
  7 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-28  7:30 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce DMA device library PMD header which was driver
facing APIs for a DMA device.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 lib/dmadev/meson.build      |  1 +
 lib/dmadev/rte_dmadev.h     |  2 ++
 lib/dmadev/rte_dmadev_pmd.h | 72 +++++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/version.map      | 10 +++++++
 4 files changed, 85 insertions(+)
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h

diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index f421ec1..833baf7 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -3,3 +3,4 @@
 
 headers = files('rte_dmadev.h')
 indirect_headers += files('rte_dmadev_core.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index e523ebd..1c20212 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -718,6 +718,8 @@ struct rte_dmadev_sge {
 	uint32_t length; /**< The DMA operation length. */
 };
 
+#include "rte_dmadev_core.h"
+
 /* DMA flags to augment operation preparation. */
 #define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
 /**< DMA fence flag.
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000..45141f9
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_PMD_H_
+#define _RTE_DMADEV_PMD_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device PMD APIs
+ *
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param dev
+ *   Device to be released.
+ *
+ * @return
+ *   - 0 on success, negative on error
+ */
+__rte_internal
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev);
+
+/**
+ * @internal
+ * Return the DMA device based on the device name.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_PMD_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index 2e37882..d027eea 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -22,3 +22,13 @@ EXPERIMENTAL {
 
 	local: *;
 };
+
+INTERNAL {
+        global:
+
+	rte_dmadev_get_device_by_name;
+	rte_dmadev_pmd_allocate;
+	rte_dmadev_pmd_release;
+
+	local: *;
+};
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v17 4/8] dmadev: introduce DMA device library implementation
  2021-08-28  7:29 ` [dpdk-dev] [PATCH v17 0/8] support dmadev Chengwen Feng
                     ` (2 preceding siblings ...)
  2021-08-28  7:30   ` [dpdk-dev] [PATCH v17 3/8] dmadev: introduce DMA device library PMD header Chengwen Feng
@ 2021-08-28  7:30   ` Chengwen Feng
  2021-08-28  7:30   ` [dpdk-dev] [PATCH v17 5/8] doc: add DMA device library guide Chengwen Feng
                     ` (3 subsequent siblings)
  7 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-28  7:30 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch introduce DMA device library implementation which includes
configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 config/rte_config.h          |   3 +
 lib/dmadev/meson.build       |   1 +
 lib/dmadev/rte_dmadev.c      | 557 +++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev.h      | 118 ++++++++-
 lib/dmadev/rte_dmadev_core.h |   2 +
 lib/dmadev/version.map       |   1 +
 6 files changed, 670 insertions(+), 12 deletions(-)
 create mode 100644 lib/dmadev/rte_dmadev.c

diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c..331a431 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -81,6 +81,9 @@
 /* rawdev defines */
 #define RTE_RAWDEV_MAX_DEVS 64
 
+/* dmadev defines */
+#define RTE_DMADEV_MAX_DEVS 64
+
 /* ip_fragmentation defines */
 #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
 #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index 833baf7..d2fc85e 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2021 HiSilicon Limited.
 
+sources = files('rte_dmadev.c')
 headers = files('rte_dmadev.h')
 indirect_headers += files('rte_dmadev_core.h')
 driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000..1c94640
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,557 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
+
+static const char *mz_rte_dmadev_data = "rte_dmadev_data";
+/* Shared memory between primary and secondary processes. */
+static struct {
+	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
+} *dmadev_shared_data;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dmadev_logtype, INFO);
+#define RTE_DMADEV_LOG(level, ...) \
+	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "" __VA_ARGS__)
+
+/* Macros to check for valid device id */
+#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dmadev_is_valid_dev(dev_id)) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+static int
+dmadev_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMADEV_LOG(ERR, "Name can't be NULL\n");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMADEV_LOG(ERR, "Zero length DMA device name\n");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
+		RTE_DMADEV_LOG(ERR, "DMA device name is too long\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint16_t
+dmadev_find_free_dev(void)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (dmadev_shared_data->data[i].dev_name[0] == '\0')
+			return i;
+	}
+
+	return RTE_DMADEV_MAX_DEVS;
+}
+
+static struct rte_dmadev*
+dmadev_find(const char *name)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&
+		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
+			return &rte_dmadevices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dmadev_shared_data_prepare(void)
+{
+	const struct rte_memzone *mz;
+
+	if (dmadev_shared_data == NULL) {
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			/* Allocate port data and ownership shared memory. */
+			mz = rte_memzone_reserve(mz_rte_dmadev_data,
+					 sizeof(*dmadev_shared_data),
+					 rte_socket_id(), 0);
+		} else
+			mz = rte_memzone_lookup(mz_rte_dmadev_data);
+		if (mz == NULL)
+			return -ENOMEM;
+
+		dmadev_shared_data = mz->addr;
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+			memset(dmadev_shared_data->data, 0,
+			       sizeof(dmadev_shared_data->data));
+	}
+
+	return 0;
+}
+
+static struct rte_dmadev *
+dmadev_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t dev_id;
+
+	dev = dmadev_find(name);
+	if (dev != NULL) {
+		RTE_DMADEV_LOG(ERR, "DMA device already allocated\n");
+		return NULL;
+	}
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	dev_id = dmadev_find_free_dev();
+	if (dev_id == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices\n");
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+	dev->data = &dmadev_shared_data->data[dev_id];
+	dev->data->dev_id = dev_id;
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+
+	return dev;
+}
+
+static struct rte_dmadev *
+dmadev_attach_secondary(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t i;
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data\n");
+		return NULL;
+	}
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %s is not driven by the primary process\n",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[i];
+	dev->data = &dmadev_shared_data->data[i];
+	dev->dev_private = dev->data->dev_private;
+
+	return dev;
+}
+
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dmadev_allocate(name);
+	else
+		dev = dmadev_attach_secondary(name);
+
+	if (dev == NULL)
+		return NULL;
+	dev->state = RTE_DMADEV_ATTACHED;
+
+	return dev;
+}
+
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev)
+{
+	void *dev_private_tmp;
+
+	if (dev == NULL)
+		return -EINVAL;
+
+	if (dev->state == RTE_DMADEV_UNUSED)
+		return 0;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
+
+	dev_private_tmp = dev->dev_private;
+	memset(dev, 0, sizeof(struct rte_dmadev));
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev->dev_private = dev_private_tmp;
+	dev->state = RTE_DMADEV_UNUSED;
+
+	return 0;
+}
+
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name)
+{
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+	return dmadev_find(name);
+}
+
+int
+rte_dmadev_get_dev_id(const char *name)
+{
+	struct rte_dmadev *dev = rte_dmadev_get_device_by_name(name);
+	if (dev != NULL)
+		return dev->data->dev_id;
+	return -EINVAL;
+}
+
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id)
+{
+	return (dev_id < RTE_DMADEV_MAX_DEVS) &&
+		rte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED;
+}
+
+uint16_t
+rte_dmadev_count(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
+			count++;
+	}
+
+	return count;
+}
+
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dmadev_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->device = dev->device;
+	dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
+
+	return 0;
+}
+
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans == 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure zero vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans > info.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure too many vchans\n", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_silent &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_SILENT)) {
+		RTE_DMADEV_LOG(ERR, "Device %u don't support silent\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf);
+	if (ret == 0)
+		memcpy(&dev->data->dev_conf, dev_conf, sizeof(*dev_conf));
+
+	return ret;
+}
+
+int
+rte_dmadev_start(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already started\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->data->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dmadev_stop(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started == 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already stopped\n", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dmadev_close(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped before closing\n", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	return (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+		       const struct rte_dmadev_vchan_conf *conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	bool src_is_dev, dst_is_dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration\n",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+	if (vchan >= info.nb_vchans) {
+		RTE_DMADEV_LOG(ERR, "Device %u vchan out range!\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2mem transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2dev transfer\n", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u number of descriptors invalid\n", dev_id);
+		return -EINVAL;
+	}
+	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->src_port.port_type == RTE_DMADEV_PORT_NONE && src_is_dev) ||
+	    (conf->src_port.port_type != RTE_DMADEV_PORT_NONE && !src_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u source port type invalid\n", dev_id);
+		return -EINVAL;
+	}
+	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->dst_port.port_type == RTE_DMADEV_PORT_NONE && dst_is_dev) ||
+	    (conf->dst_port.port_type != RTE_DMADEV_PORT_NONE && !dst_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u destination port type invalid\n", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, vchan, conf);
+}
+
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (stats == NULL)
+		return -EINVAL;
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	memset(stats, 0, sizeof(struct rte_dmadev_stats));
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dmadev_stats));
+}
+
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range\n", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (f == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail\n", dev_id);
+		return -EINVAL;
+	}
+
+	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
+	fprintf(f, "  dev_capa: 0x%" PRIx64 "\n", info.dev_capa);
+	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
+	fprintf(f, "  nb_vchans_configured: %u\n", info.nb_vchans);
+	fprintf(f, "  silent_mode: %s\n",
+		dev->data->dev_conf.enable_silent ? "on" : "off");
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 1c20212..e8f58e9 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -775,9 +775,21 @@ struct rte_dmadev_sge {
  *   - other values < 0 on failure.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
-		uint32_t length, uint64_t flags);
+		uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+#endif
+
+	return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
 
 /**
  * @warning
@@ -813,10 +825,23 @@ rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
  *   - other values < 0 on failure.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
 		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
-		   uint64_t flags);
+		   uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans ||
+	    src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+#endif
+
+	return (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);
+}
 
 /**
  * @warning
@@ -848,9 +873,21 @@ rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
  *   - other values < 0 on failure.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
-		rte_iova_t dst, uint32_t length, uint64_t flags);
+		rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+#endif
+
+	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
 
 /**
  * @warning
@@ -870,8 +907,20 @@ rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
  *   0 on success. Otherwise negative value is returned.
  */
 __rte_experimental
-int
-rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
+static inline int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+#endif
+
+	return (*dev->submit)(dev, vchan);
+}
 
 /**
  * @warning
@@ -897,9 +946,37 @@ rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
  *   must be less than or equal to the value of nb_cpls.
  */
 __rte_experimental
-uint16_t
+static inline uint16_t
 rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
-		     uint16_t *last_idx, bool *has_error);
+		     uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans || nb_cpls == 0)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
 
 /**
  * @warning
@@ -929,10 +1006,27 @@ rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
  *   status array are also set.
  */
 __rte_experimental
-uint16_t
+static inline uint16_t
 rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
 			    const uint16_t nb_cpls, uint16_t *last_idx,
-			    enum rte_dma_status_code *status);
+			    enum rte_dma_status_code *status)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) ||
+	    vchan >= dev->data->dev_conf.nb_vchans ||
+	    nb_cpls == 0 || status == NULL)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
+}
 
 #ifdef __cplusplus
 }
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index c44d88e..e94aa1c 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -173,4 +173,6 @@ struct rte_dmadev {
 	uint64_t reserved[2]; /**< Reserved for future fields. */
 } __rte_cache_aligned;
 
+extern struct rte_dmadev rte_dmadevices[];
+
 #endif /* _RTE_DMADEV_CORE_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index d027eea..80be592 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -26,6 +26,7 @@ EXPERIMENTAL {
 INTERNAL {
         global:
 
+	rte_dmadevices;
 	rte_dmadev_get_device_by_name;
 	rte_dmadev_pmd_allocate;
 	rte_dmadev_pmd_release;
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v17 5/8] doc: add DMA device library guide
  2021-08-28  7:29 ` [dpdk-dev] [PATCH v17 0/8] support dmadev Chengwen Feng
                     ` (3 preceding siblings ...)
  2021-08-28  7:30   ` [dpdk-dev] [PATCH v17 4/8] dmadev: introduce DMA device library implementation Chengwen Feng
@ 2021-08-28  7:30   ` Chengwen Feng
  2021-08-28  7:30   ` [dpdk-dev] [PATCH v17 6/8] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
                     ` (2 subsequent siblings)
  7 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-28  7:30 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch adds dmadev library guide.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Conor Walsh <conor.walsh@intel.com>
---
 doc/guides/prog_guide/dmadev.rst     | 125 ++++++++++++++++
 doc/guides/prog_guide/img/dmadev.svg | 283 +++++++++++++++++++++++++++++++++++
 doc/guides/prog_guide/index.rst      |   1 +
 3 files changed, 409 insertions(+)
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg

diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
new file mode 100644
index 0000000..e47a164
--- /dev/null
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -0,0 +1,125 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright 2021 HiSilicon Limited
+
+DMA Device Library
+====================
+
+The DMA library provides a DMA device framework for management and provisioning
+of hardware and software DMA poll mode drivers, defining generic APIs which
+support a number of different DMA operations.
+
+
+Design Principles
+-----------------
+
+The DMA library follows the same basic principles as those used in DPDK's
+Ethernet Device framework and the RegEx framework. The DMA framework provides
+a generic DMA device framework which supports both physical (hardware)
+and virtual (software) DMA devices as well as a generic DMA API which allows
+DMA devices to be managed and configured and supports DMA operations to be
+provisioned on DMA poll mode driver.
+
+.. _figure_dmadev:
+
+.. figure:: img/dmadev.*
+
+The above figure shows the model on which the DMA framework is built on:
+
+ * The DMA controller could have multiple hardware DMA channels (aka. hardware
+   DMA queues), each hardware DMA channel should be represented by a dmadev.
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+   channel represents a different transfer context. The DMA operation request
+   must be submitted to the virtual DMA channel. e.g. Application could create
+   virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+   virtual DMA channel 1 for memory-to-device transfer scenario.
+
+
+Device Management
+-----------------
+
+Device Creation
+~~~~~~~~~~~~~~~
+
+Physical DMA controllers are discovered during the PCI probe/enumeration of the
+EAL function which is executed at DPDK initialization, this is based on their
+PCI BDF (bus/bridge, device, function). Specific physical DMA controllers, like
+other physical devices in DPDK can be listed using the EAL command line options.
+
+The dmadevs are dynamically allocated by using the API
+``rte_dmadev_pmd_allocate`` based on the number of hardware DMA channels.
+
+
+Device Identification
+~~~~~~~~~~~~~~~~~~~~~
+
+Each DMA device, whether physical or virtual is uniquely designated by two
+identifiers:
+
+- A unique device index used to designate the DMA device in all functions
+  exported by the DMA API.
+
+- A device name used to designate the DMA device in console messages, for
+  administration or debugging purposes.
+
+
+Device Configuration
+~~~~~~~~~~~~~~~~~~~~
+
+The rte_dmadev_configure API is used to configure a DMA device.
+
+.. code-block:: c
+
+   int rte_dmadev_configure(uint16_t dev_id,
+                            const struct rte_dmadev_conf *dev_conf);
+
+The ``rte_dmadev_conf`` structure is used to pass the configuration parameters
+for the DMA device for example the number of virtual DMA channels to set up,
+indication of whether to enable silent mode.
+
+
+Configuration of Virtual DMA Channels
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The rte_dmadev_vchan_setup API is used to configure a virtual DMA channel.
+
+.. code-block:: c
+
+   int rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+                              const struct rte_dmadev_vchan_conf *conf);
+
+The ``rte_dmadev_vchan_conf`` structure is used to pass the configuration
+parameters for the virtual DMA channel for example transfer direction, number of
+descriptor for the virtual DMA channel, source device access port parameter,
+destination device access port parameter.
+
+
+Device Features and Capabilities
+--------------------------------
+
+DMA devices may support different feature sets. The ``rte_dmadev_info_get`` API
+can be used to get the device info and supported features.
+
+Silent mode is a special device capability which does not require the
+application to invoke dequeue APIs.
+
+
+Enqueue / Dequeue APIs
+~~~~~~~~~~~~~~~~~~~~~~
+
+Enqueue APIs such as ``rte_dmadev_copy`` and ``rte_dmadev_fill`` can be used to
+enqueue operations to hardware. If an enqueue is successful, a ``ring_idx`` is
+returned. This ``ring_idx`` can be used by applications to track per operation
+metadata in an application-defined circular ring.
+
+The ``rte_dmadev_submit`` API is used to issue doorbell to hardware.
+Alternatively the ``RTE_DMA_OP_FLAG_SUBMIT`` flag can be passed to the enqueue
+APIs to also issue the doorbell to hardware.
+
+There are two dequeue APIs ``rte_dmadev_completed`` and
+``rte_dmadev_completed_status``, these are used to obtain the results of
+the enqueue requests. ``rte_dmadev_completed`` will return the number of
+successfully completed operations. ``rte_dmadev_completed_status`` will return
+the number of completed operations along with the status of each operation
+(filled into the ``status`` array passed by user). These two APIs can also
+return the last completed operation's ``ring_idx`` which could help user track
+operations within their own application-defined rings.
diff --git a/doc/guides/prog_guide/img/dmadev.svg b/doc/guides/prog_guide/img/dmadev.svg
new file mode 100644
index 0000000..157d7eb
--- /dev/null
+++ b/doc/guides/prog_guide/img/dmadev.svg
@@ -0,0 +1,283 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<!-- SPDX-License-Identifier: BSD-3-Clause -->
+<!-- Copyright(c) 2021 HiSilicon Limited -->
+
+<svg
+   width="128.64288mm"
+   height="95.477707mm"
+   viewBox="0 0 192.96433 143.21656"
+   version="1.1"
+   id="svg934"
+   inkscape:version="1.1 (c68e22c387, 2021-05-23)"
+   sodipodi:docname="dmadev.svg"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg">
+  <sodipodi:namedview
+     id="namedview936"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageshadow="2"
+     inkscape:pageopacity="0.0"
+     inkscape:pagecheckerboard="0"
+     inkscape:document-units="mm"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:showpageshadow="false"
+     inkscape:zoom="1.332716"
+     inkscape:cx="335.03011"
+     inkscape:cy="143.69152"
+     inkscape:window-width="1920"
+     inkscape:window-height="976"
+     inkscape:window-x="-8"
+     inkscape:window-y="-8"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="layer1"
+     scale-x="1.5"
+     units="mm" />
+  <defs
+     id="defs931">
+    <rect
+       x="342.43954"
+       y="106.56832"
+       width="58.257381"
+       height="137.82834"
+       id="rect17873" />
+  </defs>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-0.13857517,-21.527306)">
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9"
+       width="50"
+       height="28"
+       x="0.13857517"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1"
+       transform="translate(-49.110795,15.205683)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1045">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1047">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5"
+       width="50"
+       height="28"
+       x="60.138577"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4"
+       transform="translate(10.512565,15.373298)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1049">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1051">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5-3"
+       width="50"
+       height="28"
+       x="137.43863"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-8"
+       transform="translate(88.79231,15.373299)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1053">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1055">channel</tspan></text>
+    <text
+       xml:space="preserve"
+       transform="matrix(0.26458333,0,0,0.26458333,-0.04940429,21.408845)"
+       id="text17871"
+       style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;white-space:pre;shape-inside:url(#rect17873);fill:#000000;fill-opacity:1;stroke:none" />
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8"
+       width="38.34557"
+       height="19.729115"
+       x="36.138577"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3"
+       transform="translate(-13.394978,59.135217)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1057">dmadev</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0"
+       width="60.902534"
+       height="24.616455"
+       x="25.196909"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76"
+       transform="translate(-24.485484,90.97883)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1059">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1061">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-6"
+       width="60.902534"
+       height="24.616455"
+       x="132.20036"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-7"
+       transform="translate(82.950904,90.79085)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1063">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1065">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-4"
+       width="60.902534"
+       height="24.616455"
+       x="76.810928"
+       y="140.12741"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-4"
+       transform="translate(27.032341,133.10574)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1067">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1069">controller</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8-5"
+       width="38.34557"
+       height="19.729115"
+       x="143.43863"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-7"
+       transform="translate(94.92597,59.664385)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1071">dmadev</tspan></text>
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 74.476373,49.527306 62.82407,64.827354"
+       id="path45308"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 35.924309,49.527306 47.711612,64.827354"
+       id="path45310"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 55.403414,84.556469 55.53332,98.47744"
+       id="path45312"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8"
+       inkscape:connection-end="#rect31-9-5-8-0" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.62241,84.556469 0.0155,13.920971"
+       id="path45320"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-5"
+       inkscape:connection-end="#rect31-9-5-8-0-6" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 146.28317,123.09389 -22.65252,17.03352"
+       id="path45586"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0-6"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 70.900938,123.09389 21.108496,17.03352"
+       id="path45588"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.50039,49.527306 0.0675,15.300048"
+       id="path45956"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-3"
+       inkscape:connection-end="#rect31-9-5-8-5" />
+  </g>
+</svg>
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 2dce507..0abea06 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -29,6 +29,7 @@ Programmer's Guide
     regexdev
     rte_security
     rawdev
+    dmadev
     link_bonding_poll_mode_drv_lib
     timer_lib
     hash_lib
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v17 6/8] dma/skeleton: introduce skeleton dmadev driver
  2021-08-28  7:29 ` [dpdk-dev] [PATCH v17 0/8] support dmadev Chengwen Feng
                     ` (4 preceding siblings ...)
  2021-08-28  7:30   ` [dpdk-dev] [PATCH v17 5/8] doc: add DMA device library guide Chengwen Feng
@ 2021-08-28  7:30   ` Chengwen Feng
  2021-08-28  7:30   ` [dpdk-dev] [PATCH v17 7/8] app/test: add dmadev API test Chengwen Feng
  2021-08-28  7:30   ` [dpdk-dev] [PATCH v17 8/8] maintainers: add for dmadev Chengwen Feng
  7 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-28  7:30 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

Skeleton dmadevice driver, on the lines of rawdev skeleton, is for
showcasing of the dmadev library.

Design of skeleton involves a virtual device which is plugged into VDEV
bus on initialization.

Also, enable compilation of dmadev skeleton drivers.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
 drivers/dma/meson.build                |  11 +
 drivers/dma/skeleton/meson.build       |   7 +
 drivers/dma/skeleton/skeleton_dmadev.c | 594 +++++++++++++++++++++++++++++++++
 drivers/dma/skeleton/skeleton_dmadev.h |  73 ++++
 drivers/dma/skeleton/version.map       |   3 +
 drivers/meson.build                    |   1 +
 6 files changed, 689 insertions(+)
 create mode 100644 drivers/dma/meson.build
 create mode 100644 drivers/dma/skeleton/meson.build
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
 create mode 100644 drivers/dma/skeleton/version.map

diff --git a/drivers/dma/meson.build b/drivers/dma/meson.build
new file mode 100644
index 0000000..0c2c34c
--- /dev/null
+++ b/drivers/dma/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+if is_windows
+    subdir_done()
+endif
+
+drivers = [
+        'skeleton',
+]
+std_deps = ['dmadev']
diff --git a/drivers/dma/skeleton/meson.build b/drivers/dma/skeleton/meson.build
new file mode 100644
index 0000000..27509b1
--- /dev/null
+++ b/drivers/dma/skeleton/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+deps += ['dmadev', 'kvargs', 'ring', 'bus_vdev']
+sources = files(
+        'skeleton_dmadev.c',
+)
diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c
new file mode 100644
index 0000000..e97b537
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.c
@@ -0,0 +1,594 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_kvargs.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_ring.h>
+
+#include <rte_dmadev_pmd.h>
+
+#include "skeleton_dmadev.h"
+
+/* Count of instances */
+static uint16_t skeldma_init_once;
+
+static int
+skeldma_info_get(const struct rte_dmadev *dev, struct rte_dmadev_info *dev_info,
+		 uint32_t info_sz)
+{
+#define SKELDMA_MAX_DESC	8192
+#define SKELDMA_MIN_DESC	128
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(info_sz);
+
+	dev_info->dev_capa = RTE_DMADEV_CAPA_MEM_TO_MEM |
+			     RTE_DMADEV_CAPA_SVA |
+			     RTE_DMADEV_CAPA_OPS_COPY;
+	dev_info->max_vchans = 1;
+	dev_info->max_desc = SKELDMA_MAX_DESC;
+	dev_info->min_desc = SKELDMA_MIN_DESC;
+
+	return 0;
+}
+
+static int
+skeldma_configure(struct rte_dmadev *dev, const struct rte_dmadev_conf *conf)
+{
+	RTE_SET_USED(dev);
+	RTE_SET_USED(conf);
+	return 0;
+}
+
+static void *
+cpucopy_thread(void *param)
+{
+#define SLEEP_THRESHOLD		10000
+#define SLEEP_US_VAL		10
+
+	struct rte_dmadev *dev = (struct rte_dmadev *)param;
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	int ret;
+
+	while (!hw->exit_flag) {
+		ret = rte_ring_dequeue(hw->desc_running, (void **)&desc);
+		if (ret) {
+			hw->zero_req_count++;
+			if (hw->zero_req_count > SLEEP_THRESHOLD) {
+				if (hw->zero_req_count == 0)
+					hw->zero_req_count = SLEEP_THRESHOLD;
+				rte_delay_us_sleep(SLEEP_US_VAL);
+			}
+			continue;
+		}
+
+		hw->zero_req_count = 0;
+		rte_memcpy(desc->dst, desc->src, desc->len);
+		hw->completed_count++;
+		(void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
+	}
+
+	return NULL;
+}
+
+static void
+fflush_ring(struct skeldma_hw *hw, struct rte_ring *ring)
+{
+	struct skeldma_desc *desc = NULL;
+	while (rte_ring_count(ring) > 0) {
+		(void)rte_ring_dequeue(ring, (void **)&desc);
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+}
+
+static int
+skeldma_start(struct rte_dmadev *dev)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	rte_cpuset_t cpuset;
+	int ret;
+
+	if (hw->desc_mem == NULL) {
+		SKELDMA_ERR("Vchan was not setup, start fail!\n");
+		return -EINVAL;
+	}
+
+	/* Reset the dmadev to a known state, include:
+	 * 1) fflush pending/running/completed ring to empty ring.
+	 * 2) init ring idx to zero.
+	 * 3) init running statistics.
+	 * 4) mark cpucopy task exit_flag to false.
+	 */
+	fflush_ring(hw, hw->desc_pending);
+	fflush_ring(hw, hw->desc_running);
+	fflush_ring(hw, hw->desc_completed);
+	hw->ridx = 0;
+	hw->submitted_count = 0;
+	hw->zero_req_count = 0;
+	hw->completed_count = 0;
+	hw->exit_flag = false;
+
+	rte_mb();
+
+	ret = rte_ctrl_thread_create(&hw->thread, "dma_skeleton", NULL,
+				     cpucopy_thread, dev);
+	if (ret) {
+		SKELDMA_ERR("Start cpucopy thread fail!\n");
+		return -EINVAL;
+	}
+
+	if (hw->lcore_id != -1) {
+		cpuset = rte_lcore_cpuset(hw->lcore_id);
+		ret = pthread_setaffinity_np(hw->thread, sizeof(cpuset),
+					     &cpuset);
+		if (ret)
+			SKELDMA_WARN("Set thread affinity lcore = %u fail!\n",
+				     hw->lcore_id);
+	}
+
+	return 0;
+}
+
+static int
+skeldma_stop(struct rte_dmadev *dev)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	hw->exit_flag = true;
+	rte_delay_ms(1);
+
+	pthread_cancel(hw->thread);
+	pthread_join(hw->thread, NULL);
+
+	return 0;
+}
+
+static int
+vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc)
+{
+	struct skeldma_desc *desc;
+	struct rte_ring *empty;
+	struct rte_ring *pending;
+	struct rte_ring *running;
+	struct rte_ring *completed;
+	uint16_t i;
+
+	desc = rte_zmalloc_socket("dma_skelteon_desc",
+				  nb_desc * sizeof(struct skeldma_desc),
+				  RTE_CACHE_LINE_SIZE, hw->socket_id);
+	if (desc == NULL) {
+		SKELDMA_ERR("Malloc dma skeleton desc fail!\n");
+		return -ENOMEM;
+	}
+
+	empty = rte_ring_create("dma_skeleton_desc_empty", nb_desc,
+				hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	pending = rte_ring_create("dma_skeleton_desc_pending", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	running = rte_ring_create("dma_skeleton_desc_running", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	completed = rte_ring_create("dma_skeleton_desc_completed", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (empty == NULL || pending == NULL || running == NULL ||
+	    completed == NULL) {
+		SKELDMA_ERR("Create dma skeleton desc ring fail!\n");
+		rte_ring_free(empty);
+		rte_ring_free(pending);
+		rte_ring_free(running);
+		rte_ring_free(completed);
+		rte_free(desc);
+		return -ENOMEM;
+	}
+
+	/* The real usable ring size is *count-1* instead of *count* to
+	 * differentiate a free ring from an empty ring.
+	 * @see rte_ring_create
+	 */
+	for (i = 0; i < nb_desc - 1; i++)
+		(void)rte_ring_enqueue(empty, (void *)(desc + i));
+
+	hw->desc_mem = desc;
+	hw->desc_empty = empty;
+	hw->desc_pending = pending;
+	hw->desc_running = running;
+	hw->desc_completed = completed;
+
+	return 0;
+}
+
+static void
+vchan_release(struct skeldma_hw *hw)
+{
+	if (hw->desc_mem == NULL)
+		return;
+
+	rte_free(hw->desc_mem);
+	hw->desc_mem = NULL;
+	rte_ring_free(hw->desc_empty);
+	hw->desc_empty = NULL;
+	rte_ring_free(hw->desc_pending);
+	hw->desc_pending = NULL;
+	rte_ring_free(hw->desc_running);
+	hw->desc_running = NULL;
+	rte_ring_free(hw->desc_completed);
+	hw->desc_completed = NULL;
+}
+
+static int
+skeldma_close(struct rte_dmadev *dev)
+{
+	/* The device already stopped */
+	vchan_release(dev->dev_private);
+	return 0;
+}
+
+static int
+skeldma_vchan_setup(struct rte_dmadev *dev, uint16_t vchan,
+		    const struct rte_dmadev_vchan_conf *conf)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+
+	if (!rte_is_power_of_2(conf->nb_desc)) {
+		SKELDMA_ERR("Number of desc must be power of 2!\n");
+		return -EINVAL;
+	}
+
+	vchan_release(hw);
+	return vchan_setup(hw, conf->nb_desc);
+}
+
+static int
+skeldma_stats_get(const struct rte_dmadev *dev, uint16_t vchan,
+		  struct rte_dmadev_stats *stats, uint32_t stats_sz)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(stats_sz);
+
+	stats->submitted = hw->submitted_count;
+	stats->completed = hw->completed_count;
+	stats->errors = 0;
+
+	return 0;
+}
+
+static int
+skeldma_stats_reset(struct rte_dmadev *dev, uint16_t vchan)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+
+	hw->submitted_count = 0;
+	hw->completed_count = 0;
+
+	return 0;
+}
+
+static int
+skeldma_dump(const struct rte_dmadev *dev, FILE *f)
+{
+#define GET_RING_COUNT(ring)	((ring) ? (rte_ring_count(ring)) : 0)
+
+	struct skeldma_hw *hw = dev->dev_private;
+
+	fprintf(f,
+		"  lcore_id: %d\n"
+		"  socket_id: %d\n"
+		"  desc_empty_ring_count: %u\n"
+		"  desc_pending_ring_count: %u\n"
+		"  desc_running_ring_count: %u\n"
+		"  desc_completed_ring_count: %u\n",
+		hw->lcore_id, hw->socket_id,
+		GET_RING_COUNT(hw->desc_empty),
+		GET_RING_COUNT(hw->desc_pending),
+		GET_RING_COUNT(hw->desc_running),
+		GET_RING_COUNT(hw->desc_completed));
+	fprintf(f,
+		"  next_ring_idx: %u\n"
+		"  submitted_count: %" PRIu64 "\n"
+		"  completed_count: %" PRIu64 "\n",
+		hw->ridx, hw->submitted_count, hw->completed_count);
+
+	return 0;
+}
+
+static void
+submit(struct skeldma_hw *hw, struct skeldma_desc *desc)
+{
+	uint16_t count = rte_ring_count(hw->desc_pending);
+	struct skeldma_desc *pend_desc = NULL;
+
+	while (count > 0) {
+		(void)rte_ring_dequeue(hw->desc_pending, (void **)&pend_desc);
+		(void)rte_ring_enqueue(hw->desc_running, (void *)pend_desc);
+		count--;
+	}
+
+	if (desc)
+		(void)rte_ring_enqueue(hw->desc_running, (void *)desc);
+}
+
+static int
+skeldma_copy(struct rte_dmadev *dev, uint16_t vchan,
+	     rte_iova_t src, rte_iova_t dst,
+	     uint32_t length, uint64_t flags)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc;
+	int ret;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(flags);
+
+	ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc);
+	if (ret)
+		return -ENOSPC;
+	desc->src = (void *)(uintptr_t)src;
+	desc->dst = (void *)(uintptr_t)dst;
+	desc->len = length;
+	desc->ridx = hw->ridx++;
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT)
+		submit(hw, desc);
+	else
+		(void)rte_ring_enqueue(hw->desc_pending, (void *)desc);
+	hw->submitted_count++;
+
+	return 0;
+}
+
+static int
+skeldma_submit(struct rte_dmadev *dev, uint16_t vchan)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	RTE_SET_USED(vchan);
+	submit(hw, NULL);
+	return 0;
+}
+
+static uint16_t
+skeldma_completed(struct rte_dmadev *dev,
+		  uint16_t vchan, const uint16_t nb_cpls,
+		  uint16_t *last_idx, bool *has_error)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	uint16_t index = 0;
+	uint16_t count;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(has_error);
+
+	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+	while (index < count) {
+		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+		if (index == count - 1)
+			*last_idx = desc->ridx;
+		index++;
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+
+	return count;
+}
+
+static uint16_t
+skeldma_completed_status(struct rte_dmadev *dev,
+			 uint16_t vchan, const uint16_t nb_cpls,
+			 uint16_t *last_idx, enum rte_dma_status_code *status)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	uint16_t index = 0;
+	uint16_t count;
+
+	RTE_SET_USED(vchan);
+
+	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+	while (index < count) {
+		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+		if (index == count - 1)
+			*last_idx = desc->ridx;
+		status[index++] = RTE_DMA_STATUS_SUCCESSFUL;
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+
+	return count;
+}
+
+static const struct rte_dmadev_ops skeldma_ops = {
+	.dev_info_get = skeldma_info_get,
+	.dev_configure = skeldma_configure,
+	.dev_start = skeldma_start,
+	.dev_stop = skeldma_stop,
+	.dev_close = skeldma_close,
+
+	.vchan_setup = skeldma_vchan_setup,
+
+	.stats_get = skeldma_stats_get,
+	.stats_reset = skeldma_stats_reset,
+
+	.dev_dump = skeldma_dump,
+};
+
+static int
+skeldma_create(const char *name, struct rte_vdev_device *vdev, int lcore_id)
+{
+	struct rte_dmadev *dev;
+	struct skeldma_hw *hw;
+	int socket_id;
+
+	dev = rte_dmadev_pmd_allocate(name);
+	if (dev == NULL) {
+		SKELDMA_ERR("Unable to allocate dmadev: %s\n", name);
+		return -EINVAL;
+	}
+
+	socket_id = (lcore_id < 0) ? rte_socket_id() :
+				     rte_lcore_to_socket_id(lcore_id);
+	dev->dev_private = rte_zmalloc_socket("dmadev private",
+					      sizeof(struct skeldma_hw),
+					      RTE_CACHE_LINE_SIZE,
+					      socket_id);
+	if (!dev->dev_private) {
+		SKELDMA_ERR("Unable to allocate device private memory\n");
+		(void)rte_dmadev_pmd_release(dev);
+		return -ENOMEM;
+	}
+
+	dev->copy = skeldma_copy;
+	dev->submit = skeldma_submit;
+	dev->completed = skeldma_completed;
+	dev->completed_status = skeldma_completed_status;
+	dev->dev_ops = &skeldma_ops;
+	dev->device = &vdev->device;
+
+	hw = dev->dev_private;
+	hw->lcore_id = lcore_id;
+	hw->socket_id = socket_id;
+
+	return dev->data->dev_id;
+}
+
+static int
+skeldma_destroy(const char *name)
+{
+	struct rte_dmadev *dev;
+	int ret;
+
+	dev = rte_dmadev_get_device_by_name(name);
+	if (!dev)
+		return -EINVAL;
+
+	ret = rte_dmadev_close(dev->data->dev_id);
+	if (ret)
+		return ret;
+
+	rte_free(dev->dev_private);
+	dev->dev_private = NULL;
+	(void)rte_dmadev_pmd_release(dev);
+
+	return 0;
+}
+
+static int
+skeldma_parse_lcore(const char *key __rte_unused,
+		    const char *value,
+		    void *opaque)
+{
+	int lcore_id = atoi(value);
+	if (lcore_id >= 0 && lcore_id < RTE_MAX_LCORE)
+		*(int *)opaque = lcore_id;
+	return 0;
+}
+
+static void
+skeldma_parse_vdev_args(struct rte_vdev_device *vdev, int *lcore_id)
+{
+	static const char *const args[] = {
+		SKELDMA_ARG_LCORE,
+		NULL
+	};
+
+	struct rte_kvargs *kvlist;
+	const char *params;
+
+	params = rte_vdev_device_args(vdev);
+	if (params == NULL || params[0] == '\0')
+		return;
+
+	kvlist = rte_kvargs_parse(params, args);
+	if (!kvlist)
+		return;
+
+	(void)rte_kvargs_process(kvlist, SKELDMA_ARG_LCORE,
+				 skeldma_parse_lcore, lcore_id);
+	SKELDMA_INFO("Parse lcore_id = %d\n", *lcore_id);
+
+	rte_kvargs_free(kvlist);
+}
+
+static int
+skeldma_probe(struct rte_vdev_device *vdev)
+{
+	const char *name;
+	int lcore_id = -1;
+	int ret;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		SKELDMA_ERR("Multiple process not supported for %s\n", name);
+		return -EINVAL;
+	}
+
+	/* More than one instance is not supported */
+	if (skeldma_init_once) {
+		SKELDMA_ERR("Multiple instance not supported for %s\n", name);
+		return -EINVAL;
+	}
+
+	skeldma_parse_vdev_args(vdev, &lcore_id);
+
+	ret = skeldma_create(name, vdev, lcore_id);
+	if (ret >= 0) {
+		SKELDMA_INFO("Create %s dmadev lcore-id %d\n", name, lcore_id);
+		/* Device instance created; Second instance not possible */
+		skeldma_init_once = 1;
+	}
+
+	return ret < 0 ? ret : 0;
+}
+
+static int
+skeldma_remove(struct rte_vdev_device *vdev)
+{
+	const char *name;
+	int ret;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -1;
+
+	ret = skeldma_destroy(name);
+	if (!ret) {
+		skeldma_init_once = 0;
+		SKELDMA_INFO("Remove %s dmadev\n", name);
+	}
+
+	return ret;
+}
+
+static struct rte_vdev_driver skeldma_pmd_drv = {
+	.probe = skeldma_probe,
+	.remove = skeldma_remove,
+	.drv_flags = RTE_VDEV_DRV_NEED_IOVA_AS_VA,
+};
+
+RTE_LOG_REGISTER_DEFAULT(skeldma_logtype, INFO);
+RTE_PMD_REGISTER_VDEV(dma_skeleton, skeldma_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(dma_skeleton,
+		SKELDMA_ARG_LCORE "=<uint16> ");
diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h
new file mode 100644
index 0000000..8cdc2bb
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef __SKELETON_DMADEV_H__
+#define __SKELETON_DMADEV_H__
+
+#include <rte_dmadev.h>
+
+extern int skeldma_logtype;
+#define SKELDMA_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, skeldma_logtype, "%s(): " fmt "", \
+		__func__, ##args)
+
+#define SKELDMA_DEBUG(fmt, args...) \
+	SKELDMA_LOG(DEBUG, fmt, ## args)
+#define SKELDMA_INFO(fmt, args...) \
+	SKELDMA_LOG(INFO, fmt, ## args)
+#define SKELDMA_WARN(fmt, args...) \
+	SKELDMA_LOG(WARNING, fmt, ## args)
+#define SKELDMA_ERR(fmt, args...) \
+	SKELDMA_LOG(ERR, fmt, ## args)
+
+#define SKELDMA_ARG_LCORE	"lcore"
+
+struct skeldma_desc {
+	void *src;
+	void *dst;
+	uint32_t len;
+	uint16_t ridx; /* ring idx */
+};
+
+struct skeldma_hw {
+	int lcore_id; /* cpucopy task affinity core */
+	int socket_id;
+	pthread_t thread; /* cpucopy task thread */
+	volatile int exit_flag; /* cpucopy task exit flag */
+
+	struct skeldma_desc *desc_mem;
+
+	/* Descriptor ring state machine:
+	 *
+	 *  -----------     enqueue without submit     -----------
+	 *  |  empty  |------------------------------->| pending |
+	 *  -----------\                               -----------
+	 *       ^      \------------                       |
+	 *       |                  |                       |submit doorbell
+	 *       |                  |                       |
+	 *       |                  |enqueue with submit    |
+	 *       |get completed     |------------------|    |
+	 *       |                                     |    |
+	 *       |                                     v    v
+	 *  -----------     cpucopy thread working     -----------
+	 *  |completed|<-------------------------------| running |
+	 *  -----------                                -----------
+	 */
+	struct rte_ring *desc_empty;
+	struct rte_ring *desc_pending;
+	struct rte_ring *desc_running;
+	struct rte_ring *desc_completed;
+
+	/* Cache delimiter for dataplane API's operation data */
+	char cache1 __rte_cache_aligned;
+	uint16_t ridx;  /* ring idx */
+	uint64_t submitted_count;
+
+	/* Cache delimiter for cpucopy thread's operation data */
+	char cache2 __rte_cache_aligned;
+	uint32_t zero_req_count;
+	uint64_t completed_count;
+};
+
+#endif /* __SKELETON_DMADEV_H__ */
diff --git a/drivers/dma/skeleton/version.map b/drivers/dma/skeleton/version.map
new file mode 100644
index 0000000..c2e0723
--- /dev/null
+++ b/drivers/dma/skeleton/version.map
@@ -0,0 +1,3 @@
+DPDK_22 {
+	local: *;
+};
diff --git a/drivers/meson.build b/drivers/meson.build
index bc6f4f5..383f648 100644
--- a/drivers/meson.build
+++ b/drivers/meson.build
@@ -18,6 +18,7 @@ subdirs = [
         'vdpa',           # depends on common, bus and mempool.
         'event',          # depends on common, bus, mempool and net.
         'baseband',       # depends on common and bus.
+        'dma',            # depends on common and bus.
 ]
 
 if meson.is_cross_build()
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v17 7/8] app/test: add dmadev API test
  2021-08-28  7:29 ` [dpdk-dev] [PATCH v17 0/8] support dmadev Chengwen Feng
                     ` (5 preceding siblings ...)
  2021-08-28  7:30   ` [dpdk-dev] [PATCH v17 6/8] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
@ 2021-08-28  7:30   ` Chengwen Feng
  2021-08-28  7:30   ` [dpdk-dev] [PATCH v17 8/8] maintainers: add for dmadev Chengwen Feng
  7 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-08-28  7:30 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch add dmadev API test which based on 'dma_skeleton' vdev. The
test cases could be executed using 'dmadev_autotest' command in test
framework.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 app/test/meson.build       |   4 +
 app/test/test_dmadev.c     |  45 ++++
 app/test/test_dmadev_api.c | 517 +++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 566 insertions(+)
 create mode 100644 app/test/test_dmadev.c
 create mode 100644 app/test/test_dmadev_api.c

diff --git a/app/test/meson.build b/app/test/meson.build
index a761168..9027eba 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -43,6 +43,8 @@ test_sources = files(
         'test_debug.c',
         'test_distributor.c',
         'test_distributor_perf.c',
+        'test_dmadev.c',
+        'test_dmadev_api.c',
         'test_eal_flags.c',
         'test_eal_fs.c',
         'test_efd.c',
@@ -162,6 +164,7 @@ test_deps = [
         'cmdline',
         'cryptodev',
         'distributor',
+        'dmadev',
         'efd',
         'ethdev',
         'eventdev',
@@ -333,6 +336,7 @@ driver_test_names = [
         'cryptodev_sw_mvsam_autotest',
         'cryptodev_sw_snow3g_autotest',
         'cryptodev_sw_zuc_autotest',
+        'dmadev_autotest',
         'eventdev_selftest_octeontx',
         'eventdev_selftest_sw',
         'rawdev_autotest',
diff --git a/app/test/test_dmadev.c b/app/test/test_dmadev.c
new file mode 100644
index 0000000..bb01e86
--- /dev/null
+++ b/app/test/test_dmadev.c
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_dmadev.h>
+#include <rte_bus_vdev.h>
+
+#include "test.h"
+
+/* from test_dmadev_api.c */
+extern int test_dmadev_api(uint16_t dev_id);
+
+static int
+test_apis(void)
+{
+	const char *pmd = "dma_skeleton";
+	int id;
+	int ret;
+
+	if (rte_vdev_init(pmd, NULL) < 0)
+		return TEST_SKIPPED;
+	id = rte_dmadev_get_dev_id(pmd);
+	if (id < 0)
+		return TEST_SKIPPED;
+	printf("\n### Test dmadev infrastructure using skeleton driver\n");
+	ret = test_dmadev_api(id);
+	rte_vdev_uninit(pmd);
+
+	return ret;
+}
+
+static int
+test_dmadev(void)
+{
+	/* basic sanity on dmadev infrastructure */
+	if (test_apis() < 0)
+		return -1;
+
+	return 0;
+}
+
+REGISTER_TEST_COMMAND(dmadev_autotest, test_dmadev);
diff --git a/app/test/test_dmadev_api.c b/app/test/test_dmadev_api.c
new file mode 100644
index 0000000..aefd4aa
--- /dev/null
+++ b/app/test/test_dmadev_api.c
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_test.h>
+#include <rte_dmadev.h>
+
+extern int test_dmadev_api(uint16_t dev_id);
+
+#define SKELDMA_TEST_RUN(test) \
+	testsuite_run_test(test, #test)
+
+#define TEST_MEMCPY_SIZE	1024
+#define TEST_WAIT_US_VAL	50000
+
+#define TEST_SUCCESS 0
+#define TEST_FAILED  -1
+
+static uint16_t test_dev_id;
+static uint16_t invalid_dev_id;
+
+static int total;
+static int passed;
+static int failed;
+static char *src;
+static char *dst;
+
+static int
+testsuite_setup(uint16_t dev_id)
+{
+	test_dev_id = dev_id;
+	invalid_dev_id = RTE_DMADEV_MAX_DEVS;
+
+	src = rte_malloc("dmadev_test_src", TEST_MEMCPY_SIZE, 0);
+	if (src == NULL)
+		return -ENOMEM;
+	dst = rte_malloc("dmadev_test_dst", TEST_MEMCPY_SIZE, 0);
+	if (dst == NULL)
+		return -ENOMEM;
+
+	total = 0;
+	passed = 0;
+	failed = 0;
+
+	return 0;
+}
+
+static void
+testsuite_teardown(void)
+{
+	rte_free(src);
+	rte_free(dst);
+	/* Ensure the dmadev is stopped. */
+	rte_dmadev_stop(test_dev_id);
+}
+
+static void
+testsuite_run_test(int (*test)(void), const char *name)
+{
+	int ret = 0;
+
+	if (test) {
+		ret = test();
+		if (ret < 0) {
+			failed++;
+			printf("%s Failed\n", name);
+		} else {
+			passed++;
+			printf("%s Passed\n", name);
+		}
+	}
+
+	total++;
+}
+
+static int
+test_dmadev_get_dev_id(void)
+{
+	int ret = rte_dmadev_get_dev_id("invalid_dmadev_device");
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_is_valid_dev(void)
+{
+	int ret;
+	ret = rte_dmadev_is_valid_dev(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == false, "Expected false for invalid dev id");
+	ret = rte_dmadev_is_valid_dev(test_dev_id);
+	RTE_TEST_ASSERT(ret == true, "Expected true for valid dev id");
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_count(void)
+{
+	uint16_t count = rte_dmadev_count();
+	RTE_TEST_ASSERT(count > 0, "Invalid dmadev count %u", count);
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_info_get(void)
+{
+	struct rte_dmadev_info info =  { 0 };
+	int ret;
+
+	ret = rte_dmadev_info_get(invalid_dev_id, &info);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_info_get(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_configure(void)
+{
+	struct rte_dmadev_conf conf = { 0 };
+	struct rte_dmadev_info info = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_configure(invalid_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_configure(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for nb_vchans == 0 */
+	memset(&conf, 0, sizeof(conf));
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for conf.nb_vchans > info.max_vchans */
+	ret = rte_dmadev_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans + 1;
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check enable silent mode */
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans;
+	conf.enable_silent = true;
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Configure success */
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans;
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret);
+
+	/* Check configure success */
+	ret = rte_dmadev_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	RTE_TEST_ASSERT_EQUAL(conf.nb_vchans, info.nb_vchans,
+			      "Configure nb_vchans not match");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_vchan_setup(void)
+{
+	struct rte_dmadev_vchan_conf vchan_conf = { 0 };
+	struct rte_dmadev_conf dev_conf = { 0 };
+	struct rte_dmadev_info dev_info = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_vchan_setup(invalid_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Make sure configure success */
+	ret = rte_dmadev_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	dev_conf.nb_vchans = dev_info.max_vchans;
+	ret = rte_dmadev_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret);
+
+	/* Check for invalid vchan */
+	ret = rte_dmadev_vchan_setup(test_dev_id, dev_conf.nb_vchans,
+				     &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for direction */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV + 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM - 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for direction and dev_capa combination */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_DEV;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_MEM;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for nb_desc validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc - 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.nb_desc = dev_info.max_desc + 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check src port type validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	vchan_conf.src_port.port_type = RTE_DMADEV_PORT_PCIE;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check dst port type validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	vchan_conf.dst_port.port_type = RTE_DMADEV_PORT_PCIE;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check vchan setup success */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+setup_one_vchan(void)
+{
+	struct rte_dmadev_vchan_conf vchan_conf = { 0 };
+	struct rte_dmadev_info dev_info = { 0 };
+	struct rte_dmadev_conf dev_conf = { 0 };
+	int ret;
+
+	ret = rte_dmadev_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+	dev_conf.nb_vchans = dev_info.max_vchans;
+	ret = rte_dmadev_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_start_stop(void)
+{
+	struct rte_dmadev_vchan_conf vchan_conf = { 0 };
+	struct rte_dmadev_conf dev_conf = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_start(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stop(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dmadev_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Check reconfigure and vchan setup when device started */
+	ret = rte_dmadev_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT(ret == -EBUSY, "Failed to configure, %d", ret);
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EBUSY, "Failed to setup vchan, %d", ret);
+
+	ret = rte_dmadev_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_stats(void)
+{
+	struct rte_dmadev_info dev_info = { 0 };
+	struct rte_dmadev_stats stats = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_stats_get(invalid_dev_id, 0, &stats);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stats_get(invalid_dev_id, 0, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stats_reset(invalid_dev_id, 0);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	/* Check for invalid vchan */
+	ret = rte_dmadev_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+	ret = rte_dmadev_stats_get(test_dev_id, dev_info.max_vchans, &stats);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stats_reset(test_dev_id, dev_info.max_vchans);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for valid vchan */
+	ret = rte_dmadev_stats_get(test_dev_id, 0, &stats);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get stats, %d", ret);
+	ret = rte_dmadev_stats_get(test_dev_id, RTE_DMADEV_ALL_VCHAN, &stats);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get all stats, %d", ret);
+	ret = rte_dmadev_stats_reset(test_dev_id, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset stats, %d", ret);
+	ret = rte_dmadev_stats_reset(test_dev_id, RTE_DMADEV_ALL_VCHAN);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset all stats, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_completed(void)
+{
+	uint16_t last_idx = 1;
+	bool has_error = true;
+	uint16_t cpl_ret;
+	int ret, i;
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dmadev_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Setup test memory */
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++)
+		src[i] = (char)i;
+	memset(dst, 0, TEST_MEMCPY_SIZE);
+
+	/* Check enqueue without submit */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed(test_dev_id, 0, 1, &last_idx,
+				       &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to get completed");
+
+	/* Check add submit */
+	ret = rte_dmadev_submit(test_dev_id, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to submit, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed(test_dev_id, 0, 1, &last_idx,
+				       &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u",
+				last_idx);
+	RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++) {
+		if (src[i] != dst[i]) {
+			RTE_TEST_ASSERT_EQUAL(src[i], dst[i],
+				"Failed to copy memory, %d %d", src[i], dst[i]);
+			break;
+		}
+	}
+
+	/* Setup test memory */
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++)
+		src[i] = (char)i;
+	memset(dst, 0, TEST_MEMCPY_SIZE);
+
+	/* Check for enqueue with submit */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed(test_dev_id, 0, 1, &last_idx,
+				       &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u",
+				last_idx);
+	RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++) {
+		if (src[i] != dst[i]) {
+			RTE_TEST_ASSERT_EQUAL(src[i], dst[i],
+				"Failed to copy memory, %d %d", src[i], dst[i]);
+			break;
+		}
+	}
+
+	/* Stop dmadev to make sure dmadev to a known state */
+	ret = rte_dmadev_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_completed_status(void)
+{
+	enum rte_dma_status_code status[1] = { 1 };
+	uint16_t last_idx = 1;
+	uint16_t cpl_ret, i;
+	int ret;
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dmadev_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Check for enqueue with submit */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed_status(test_dev_id, 0, 1, &last_idx,
+					      status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u",
+				last_idx);
+	for (i = 0; i < RTE_DIM(status); i++)
+		RTE_TEST_ASSERT_EQUAL(status[i], 0,
+				"Failed to completed status, %d", status[i]);
+
+	/* Check do completed status again */
+	cpl_ret = rte_dmadev_completed_status(test_dev_id, 0, 1, &last_idx,
+					      status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to completed status");
+
+	/* Check for enqueue with submit again */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed_status(test_dev_id, 0, 1, &last_idx,
+					      status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u",
+				last_idx);
+	for (i = 0; i < RTE_DIM(status); i++)
+		RTE_TEST_ASSERT_EQUAL(status[i], 0,
+				"Failed to completed status, %d", status[i]);
+
+	/* Stop dmadev to make sure dmadev to a known state */
+	ret = rte_dmadev_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+int
+test_dmadev_api(uint16_t dev_id)
+{
+	int ret = testsuite_setup(dev_id);
+	if (ret) {
+		printf("testsuite setup fail!\n");
+		return -1;
+	}
+
+	/* If the testcase exit successfully, ensure that the test dmadev exist
+	 * and the dmadev is in the stopped state.
+	 */
+	SKELDMA_TEST_RUN(test_dmadev_get_dev_id);
+	SKELDMA_TEST_RUN(test_dmadev_is_valid_dev);
+	SKELDMA_TEST_RUN(test_dmadev_count);
+	SKELDMA_TEST_RUN(test_dmadev_info_get);
+	SKELDMA_TEST_RUN(test_dmadev_configure);
+	SKELDMA_TEST_RUN(test_dmadev_vchan_setup);
+	SKELDMA_TEST_RUN(test_dmadev_start_stop);
+	SKELDMA_TEST_RUN(test_dmadev_stats);
+	SKELDMA_TEST_RUN(test_dmadev_completed);
+	SKELDMA_TEST_RUN(test_dmadev_completed_status);
+
+	testsuite_teardown();
+
+	printf("Total tests   : %d\n", total);
+	printf("Passed        : %d\n", passed);
+	printf("Failed        : %d\n", failed);
+
+	if (failed)
+		return -1;
+
+	return 0;
+};
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v17 8/8] maintainers: add for dmadev
  2021-08-28  7:29 ` [dpdk-dev] [PATCH v17 0/8] support dmadev Chengwen Feng
                     ` (6 preceding siblings ...)
  2021-08-28  7:30   ` [dpdk-dev] [PATCH v17 7/8] app/test: add dmadev API test Chengwen Feng
@ 2021-08-28  7:30   ` Chengwen Feng
  2021-08-28  8:25     ` fengchengwen
  7 siblings, 1 reply; 339+ messages in thread
From: Chengwen Feng @ 2021-08-28  7:30 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

This patch add myself as dmadev's maintainer and update release notes.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
 MAINTAINERS                            | 7 +++++++
 doc/guides/rel_notes/release_21_11.rst | 5 +++++
 2 files changed, 12 insertions(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index 266f5ac..c057a09 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -496,6 +496,13 @@ F: drivers/raw/skeleton/
 F: app/test/test_rawdev.c
 F: doc/guides/prog_guide/rawdev.rst
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+F: drivers/dma/skeleton/
+F: app/test/test_dmadev*
+F: doc/guides/prog_guide/dmadev.rst
+
 
 Memory Pool Drivers
 -------------------
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index d707a55..78b9691 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -55,6 +55,11 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Added dmadev library support.**
+
+  The dmadev library provides a DMA device framework for management and
+  provision of hardware and software DMA devices.
+
 
 Removed Items
 -------------
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v17 8/8] maintainers: add for dmadev
  2021-08-28  7:30   ` [dpdk-dev] [PATCH v17 8/8] maintainers: add for dmadev Chengwen Feng
@ 2021-08-28  8:25     ` fengchengwen
  2021-08-30  8:19       ` Bruce Richardson
  0 siblings, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-08-28  8:25 UTC (permalink / raw)
  To: dev; +Cc: Thomas Monjalon

I got checkpatch warning email of this patch, but there have none valid information:

	Test-Label: checkpatch
	Test-Status: WARNING
	http://dpdk.org/patch/97493

	_coding style issues_


	--- a/doc/guides/rel_notes/release_21_11.rst
	+++ b/doc/guides/rel_notes/release_21_11.rst
	.

Who can take a look at this ?

Thanks.

On 2021/8/28 15:30, Chengwen Feng wrote:
> This patch add myself as dmadev's maintainer and update release notes.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> ---
>  MAINTAINERS                            | 7 +++++++
>  doc/guides/rel_notes/release_21_11.rst | 5 +++++
>  2 files changed, 12 insertions(+)
> 
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 266f5ac..c057a09 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -496,6 +496,13 @@ F: drivers/raw/skeleton/
>  F: app/test/test_rawdev.c
>  F: doc/guides/prog_guide/rawdev.rst
>  
> +DMA device API - EXPERIMENTAL
> +M: Chengwen Feng <fengchengwen@huawei.com>
> +F: lib/dmadev/
> +F: drivers/dma/skeleton/
> +F: app/test/test_dmadev*
> +F: doc/guides/prog_guide/dmadev.rst
> +
>  
>  Memory Pool Drivers
>  -------------------
> diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
> index d707a55..78b9691 100644
> --- a/doc/guides/rel_notes/release_21_11.rst
> +++ b/doc/guides/rel_notes/release_21_11.rst
> @@ -55,6 +55,11 @@ New Features
>       Also, make sure to start the actual text at the margin.
>       =======================================================
>  
> +* **Added dmadev library support.**
> +
> +  The dmadev library provides a DMA device framework for management and
> +  provision of hardware and software DMA devices.
> +
>  
>  Removed Items
>  -------------
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v17 8/8] maintainers: add for dmadev
  2021-08-28  8:25     ` fengchengwen
@ 2021-08-30  8:19       ` Bruce Richardson
  0 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-08-30  8:19 UTC (permalink / raw)
  To: fengchengwen; +Cc: dev, Thomas Monjalon

On Sat, Aug 28, 2021 at 04:25:33PM +0800, fengchengwen wrote:
> I got checkpatch warning email of this patch, but there have none valid information:
> 
> 	Test-Label: checkpatch
> 	Test-Status: WARNING
> 	http://dpdk.org/patch/97493
> 
> 	_coding style issues_
> 
> 
> 	--- a/doc/guides/rel_notes/release_21_11.rst
> 	+++ b/doc/guides/rel_notes/release_21_11.rst
> 	.
> 
> Who can take a look at this ?
> 
Patch looks ok to me, so I think you can safely ignore as a false positive.

/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v18 0/8] support dmadev
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (20 preceding siblings ...)
  2021-08-28  7:29 ` [dpdk-dev] [PATCH v17 0/8] support dmadev Chengwen Feng
@ 2021-09-02 10:54 ` Chengwen Feng
  2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 1/8] dmadev: introduce DMA device library public APIs Chengwen Feng
                     ` (7 more replies)
  2021-09-02 13:13 ` [dpdk-dev] [PATCH v19 0/7] support dmadev Chengwen Feng
                   ` (7 subsequent siblings)
  29 siblings, 8 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-02 10:54 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

This patch set contains eight patch for new add dmadev.

Chengwen Feng (8):
  dmadev: introduce DMA device library public APIs
  dmadev: introduce DMA device library internal header
  dmadev: introduce DMA device library PMD header
  dmadev: introduce DMA device library implementation
  doc: add DMA device library guide
  dma/skeleton: introduce skeleton dmadev driver
  app/test: add dmadev API test
  maintainers: add for dmadev

---
v18:
* RTE_DMA_STATUS_* add BUS_READ/WRITE_ERR, PAGE_FAULT.
* rte_dmadev dataplane API add judge dev_started when debug enable.
* rte_dmadev_start/vchan_setup add judge device configured.
* rte_dmadev_dump support format capability name.
* optimized the comments of rte_dmadev.
* fix skeldma_copy always return zero when enqueue successful.
* log encapsulation macro add newline characters.
* test_dmadev_api support rte_dmadev_dump() ut.
v17:
* remove rte_dmadev_selftest() API.
* move dmadev API test from dma/skeleton to app/test.
* fix compile error of dma/skeleton driver when building for x86-32.
* fix iol spell check warning of dmadev.rst.
v16:
* redefine struct rte_dmadev_stats with fields:
  submitted, completed, errors.
* add dma skeleton.
* add dmadev ut.
v15:
* fix typo and readability of prog_guide.
* fix some public APIs return value comment inconsistent with the impl.
* add return -ENOSPC comment if enqueue fail due to no space.

 MAINTAINERS                            |    7 +
 app/test/meson.build                   |    4 +
 app/test/test_dmadev.c                 |   45 ++
 app/test/test_dmadev_api.c             |  532 ++++++++++++++++
 config/rte_config.h                    |    3 +
 doc/api/doxy-api-index.md              |    1 +
 doc/api/doxy-api.conf.in               |    1 +
 doc/guides/prog_guide/dmadev.rst       |  125 ++++
 doc/guides/prog_guide/img/dmadev.svg   |  283 +++++++++
 doc/guides/prog_guide/index.rst        |    1 +
 doc/guides/rel_notes/release_21_11.rst |    5 +
 drivers/dma/meson.build                |   11 +
 drivers/dma/skeleton/meson.build       |    7 +
 drivers/dma/skeleton/skeleton_dmadev.c |  601 ++++++++++++++++++
 drivers/dma/skeleton/skeleton_dmadev.h |   59 ++
 drivers/dma/skeleton/version.map       |    3 +
 drivers/meson.build                    |    1 +
 lib/dmadev/meson.build                 |    7 +
 lib/dmadev/rte_dmadev.c                |  614 +++++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 1045 ++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h           |  178 ++++++
 lib/dmadev/rte_dmadev_pmd.h            |   72 +++
 lib/dmadev/version.map                 |   35 ++
 lib/meson.build                        |    1 +
 24 files changed, 3641 insertions(+)
 create mode 100644 app/test/test_dmadev.c
 create mode 100644 app/test/test_dmadev_api.c
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg
 create mode 100644 drivers/dma/meson.build
 create mode 100644 drivers/dma/skeleton/meson.build
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
 create mode 100644 drivers/dma/skeleton/version.map
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v18 1/8] dmadev: introduce DMA device library public APIs
  2021-09-02 10:54 ` [dpdk-dev] [PATCH v18 0/8] support dmadev Chengwen Feng
@ 2021-09-02 10:54   ` Chengwen Feng
  2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 2/8] dmadev: introduce DMA device library internal header Chengwen Feng
                     ` (6 subsequent siblings)
  7 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-02 10:54 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

The 'dmadevice' is a generic type of DMA device.

This patch introduce the 'dmadevice' public APIs which expose generic
operations that can enable configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
---
 doc/api/doxy-api-index.md |   1 +
 doc/api/doxy-api.conf.in  |   1 +
 lib/dmadev/meson.build    |   4 +
 lib/dmadev/rte_dmadev.h   | 949 ++++++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/version.map    |  24 ++
 lib/meson.build           |   1 +
 6 files changed, 980 insertions(+)
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/version.map

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107..ce08250 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -27,6 +27,7 @@ The public API headers are grouped by topics:
   [event_timer_adapter]    (@ref rte_event_timer_adapter.h),
   [event_crypto_adapter]   (@ref rte_event_crypto_adapter.h),
   [rawdev]             (@ref rte_rawdev.h),
+  [dmadev]             (@ref rte_dmadev.h),
   [metrics]            (@ref rte_metrics.h),
   [bitrate]            (@ref rte_bitrate.h),
   [latency]            (@ref rte_latencystats.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 325a019..a44a92b 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -34,6 +34,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/cmdline \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
+                          @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/distributor \
                           @TOPDIR@/lib/efd \
                           @TOPDIR@/lib/ethdev \
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000..6d5bd85
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+headers = files('rte_dmadev.h')
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000..40ae3b1
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,949 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ * Copyright(c) 2021 Marvell International Ltd.
+ * Copyright(c) 2021 SmartShare Systems.
+ */
+
+#ifndef _RTE_DMADEV_H_
+#define _RTE_DMADEV_H_
+
+/**
+ * @file rte_dmadev.h
+ *
+ * RTE DMA (Direct Memory Access) device APIs.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW-DMA-channel |               | HW-DMA-channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW-DMA-Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel. e.g. Application could create
+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+ * virtual DMA channel 1 for memory-to-device transfer scenario.
+ *
+ * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
+ * PCI/SoC device probing phase performed at EAL initialization time. And could
+ * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
+ * phase.
+ *
+ * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dmadev_configure()
+ *     - rte_dmadev_vchan_setup()
+ *     - rte_dmadev_start()
+ *
+ * Then, the application can invoke dataplane APIs to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dmadev_configure() or rte_dmadev_vchan_setup()), it must invoke
+ * rte_dmadev_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dmadev_start() again. The dataplane APIs should not be
+ * invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the
+ * rte_dmadev_close() function.
+ *
+ * The dataplane APIs include two parts:
+ * The first part is the submission of operation requests:
+ *     - rte_dmadev_copy()
+ *     - rte_dmadev_copy_sg()
+ *     - rte_dmadev_fill()
+ *     - rte_dmadev_submit()
+ *
+ * These APIs could work with different virtual DMA channels which have
+ * different contexts.
+ *
+ * The first three APIs are used to submit the operation request to the virtual
+ * DMA channel, if the submission is successful, an uint16_t ring_idx is
+ * returned, otherwise a negative number is returned.
+ *
+ * The last API was used to issue doorbell to hardware, and also there are flags
+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
+ * same work.
+ *
+ * The second part is to obtain the result of requests:
+ *     - rte_dmadev_completed()
+ *         - return the number of operation requests completed successfully.
+ *     - rte_dmadev_completed_status()
+ *         - return the number of operation requests completed.
+ *
+ * @note The two completed APIs also support return the last completed
+ * operation's ring_idx.
+ * @note If the dmadev works in silent mode (@see RTE_DMADEV_CAPA_SILENT),
+ * application does not invoke the above two completed APIs.
+ *
+ * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
+ * rte_dmadev_fill()) returned, the rules are as follows:
+ *     - ring_idx for each virtual DMA channel are independent.
+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *       when it reach UINT16_MAX, it wraps back to zero.
+ *     - This ring_idx can be used by applications to track per-operation
+ *       metadata in an application-defined circular ring.
+ *     - The initial ring_idx of a virtual DMA channel is zero, after the
+ *       device is stopped, the ring_idx needs to be reset to zero.
+ *
+ * One example:
+ *     - step-1: start one dmadev
+ *     - step-2: enqueue a copy operation, the ring_idx return is 0
+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1
+ *     - ...
+ *     - step-101: stop the dmadev
+ *     - step-102: start the dmadev
+ *     - step-103: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *
+ * The DMA operation address used in enqueue APIs (i.e. rte_dmadev_copy(),
+ * rte_dmadev_copy_sg(), rte_dmadev_fill()) defined as rte_iova_t type. The
+ * dmadev supports two types of address: memory address and device address.
+ *
+ * - memory address: the source and destination address of the memory-to-memory
+ * transfer type, or the source address of the memory-to-device transfer type,
+ * or the destination address of the device-to-memory transfer type.
+ * @note If the device support SVA (@see RTE_DMADEV_CAPA_SVA), the memory
+ * address can be any VA address, otherwise it must be an IOVA address.
+ *
+ * - device address: the source and destination address of the device-to-device
+ * transfer type, or the source address of the device-to-memory transfer type,
+ * or the destination address of the memory-to-device transfer type.
+ *
+ * By default, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
+ *
+ */
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the device identifier for the named DMA device.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   Returns DMA device identifier on success.
+ *   - <0: Failure to find named DMA device.
+ */
+__rte_experimental
+int
+rte_dmadev_get_dev_id(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_count(void);
+
+/* Enumerates DMA device capabilities. */
+#define RTE_DMADEV_CAPA_MEM_TO_MEM	(1ull << 0)
+/**< DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_MEM_TO_DEV	(1ull << 1)
+/**< DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_MEM	(1ull << 2)
+/**< DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_DEV	(1ull << 3)
+/**< DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_SVA		(1ull << 4)
+/**< DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_SILENT		(1ull << 5)
+/**< DMA device support work in silent mode.
+ * In this mode, application don't required to invoke rte_dmadev_completed*()
+ * API.
+ *
+ * @see struct rte_dmadev_conf::silent_mode
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY	(1ull << 32)
+/**< DMA device support copy ops.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY_SG	(1ull << 33)
+/**< DMA device support scatter-gather list copy ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_FILL	(1ull << 34)
+/**< DMA device support fill ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ */
+struct rte_dmadev_info {
+	struct rte_device *device; /**< Generic Device information. */
+	uint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channels supported. */
+	uint16_t max_desc;
+	/**< Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/**< Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t max_sges;
+	/**< Maximum number of source or destination scatter-gather entry
+	 * supported.
+	 * If the device does not support COPY_SG capability, this value can be
+	 * zero.
+	 * If the device supports COPY_SG capability, then rte_dmadev_copy_sg()
+	 * parameter nb_src/nb_dst should not exceed this value.
+	 */
+	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ */
+struct rte_dmadev_conf {
+	uint16_t nb_vchans;
+	/**< The number of virtual DMA channels to set up for the DMA device.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dmadev_info which get from rte_dmadev_info_get().
+	 */
+	bool enable_silent;
+	/**< Indicates whether to enable silent mode.
+	 * false-default mode, true-silent mode.
+	 * This value can be set to true only when the SILENT capability is
+	 * supported.
+	 *
+	 * @see RTE_DMADEV_CAPA_SILENT
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dmadev_conf
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_start(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dmadev_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_stop(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_close(uint16_t dev_id);
+
+/**
+ * rte_dma_direction - DMA transfer direction defines.
+ */
+enum rte_dma_direction {
+	RTE_DMA_DIR_MEM_TO_MEM,
+	/**< DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV,
+	/**< DMA transfer direction - from memory to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is SoCs memory) to device (which is host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM,
+	/**< DMA transfer direction - from device to memory.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to memory (which is SoCs memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV,
+	/**< DMA transfer direction - from device to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to the device (which is another host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+};
+
+/**
+ * enum rte_dmadev_port_type - DMA access port type defines.
+ *
+ * @see struct rte_dmadev_port_param::port_type
+ */
+enum rte_dmadev_port_type {
+	RTE_DMADEV_PORT_NONE,
+	RTE_DMADEV_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dmadev_vchan_conf::src_port
+ * @see struct rte_dmadev_vchan_conf::dst_port
+ */
+struct rte_dmadev_port_param {
+	enum rte_dmadev_port_type port_type;
+	/**< The device access port type.
+	 * @see enum rte_dmadev_port_type
+	 */
+	union {
+		/** PCIe access port parameters.
+		 *
+		 * The following model shows SoC's PCIe module connects to
+		 * multiple PCIe hosts and multiple endpoints. The PCIe module
+		 * has an integrated DMA controller.
+		 *
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * \code{.unparsed}
+		 * System Bus
+		 *    |     ----------PCIe module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIe Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIe Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIe Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * \endcode
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		struct {
+			uint64_t coreid : 4; /**< PCIe core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			uint64_t pasid : 20;
+			/**< The pasid filed in TLP packet. */
+			uint64_t attr : 3;
+			/**< The attributes filed in TLP packet. */
+			uint64_t ph : 2;
+			/**< The processing hint filed in TLP packet. */
+			uint64_t st : 16;
+			/**< The steering tag filed in TLP packet. */
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ */
+struct rte_dmadev_vchan_conf {
+	enum rte_dma_direction direction;
+	/**< Transfer direction
+	 * @see enum rte_dma_direction
+	 */
+	uint16_t nb_desc;
+	/**< Number of descriptor for the virtual DMA channel */
+	struct rte_dmadev_port_param src_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+	struct rte_dmadev_port_param dst_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel. The value must be in the range
+ *   [0, nb_vchans - 1] previously supplied to rte_dmadev_configure().
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dmadev_vchan_conf object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+		       const struct rte_dmadev_vchan_conf *conf);
+
+/**
+ * rte_dmadev_stats - running statistics.
+ */
+struct rte_dmadev_stats {
+	uint64_t submitted;
+	/**< Count of operations which were submitted to hardware. */
+	uint64_t completed;
+	/**< Count of operations which were completed. */
+	uint64_t errors;
+	/**< Count of operations which failed to complete. */
+};
+
+#define RTE_DMADEV_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dmadev_stats
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f);
+
+/**
+ * rte_dma_status_code - DMA transfer result status code defines.
+ */
+enum rte_dma_status_code {
+	RTE_DMA_STATUS_SUCCESSFUL,
+	/**< The operation completed successfully. */
+	RTE_DMA_STATUS_USER_ABORT,
+	/**< The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPTED,
+	/**< The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/**< The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/**< The operation failed to complete due invalid destination
+	 * address.
+	 */
+	RTE_DMA_STATUS_INVALID_ADDR,
+	/**< The operation failed to complete due invalid source or destination
+	 * address, cover the case that only knows the address error, but not
+	 * sure which address error.
+	 */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/**< The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/**< The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_BUS_READ_ERROR,
+	/**< The operation failed to complete due bus read error. */
+	RTE_DMA_STATUS_BUS_WRITE_ERROR,
+	/**< The operation failed to complete due bus write error. */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/**< The operation failed to complete due bus error, cover the case that
+	 * only knows the bus error, but not sure which direction error.
+	 */
+	RTE_DMA_STATUS_DATA_POISION,
+	/**< The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/**< The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/**< The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the memory-to-device/
+	 * device-to-memory/device-to-device transfer scenario.
+	 */
+	RTE_DMA_STATUS_PAGE_FAULT,
+	/**< The operation failed to complete due lookup page fault. */
+	RTE_DMA_STATUS_ERROR_UNKNOWN = 0x100,
+	/**< The operation failed to complete due unknown reason.
+	 * The initial value is 256, which reserves space for future errors.
+	 */
+};
+
+/**
+ * rte_dmadev_sge - can hold scatter-gather DMA operation request entry.
+ */
+struct rte_dmadev_sge {
+	rte_iova_t addr; /**< The DMA operation address. */
+	uint32_t length; /**< The DMA operation length. */
+};
+
+/* DMA flags to augment operation preparation. */
+#define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
+/**< DMA fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ * If the specify DMA HW works in-order (it means it has default fence between
+ * operations), this flag could be NOP.
+ *
+ * @see rte_dmadev_copy()
+ * @see rte_dmadev_copy_sg()
+ * @see rte_dmadev_fill()
+ */
+
+#define RTE_DMA_OP_FLAG_SUBMIT	(1ull << 1)
+/**< DMA submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ */
+
+#define RTE_DMA_OP_FLAG_LLC	(1ull << 2)
+/**< DMA write data to low level cache hint.
+ * Used for performance optimization, this is just a hint, and there is no
+ * capability bit for this, driver should not return error if this flag was set.
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+		uint32_t length, uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter-gather list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter-gather list copy operation to be performed by
+ * hardware, if the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then
+ * trigger doorbell to begin this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The pointer of source scatter-gather entry array.
+ * @param dst
+ *   The pointer of destination scatter-gather entry array.
+ * @param nb_src
+ *   The number of source scatter-gather entry.
+ *   @see struct rte_dmadev_info::max_sges
+ * @param nb_dst
+ *   The number of destination scatter-gather entry.
+ *   @see struct rte_dmadev_info::max_sges
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
+		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
+		   uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
+		rte_iova_t dst, uint32_t length, uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dmadev_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's ring_idx.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed. This return value
+ *   must be less than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		     uint16_t *last_idx, bool *has_error);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been completed, and the
+ * operations result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's ring_idx.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   This is a pointer to an array of length 'nb_cpls' that holds the completion
+ *   status code of each operation.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that completed. This return value must be less
+ *   than or equal to the value of nb_cpls.
+ *   If this number is greater than zero (assuming n), then n values in the
+ *   status array are also set.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
+			    const uint16_t nb_cpls, uint16_t *last_idx,
+			    enum rte_dma_status_code *status);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000..2e37882
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,24 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dmadev_close;
+	rte_dmadev_completed;
+	rte_dmadev_completed_status;
+	rte_dmadev_configure;
+	rte_dmadev_copy;
+	rte_dmadev_copy_sg;
+	rte_dmadev_count;
+	rte_dmadev_dump;
+	rte_dmadev_fill;
+	rte_dmadev_get_dev_id;
+	rte_dmadev_info_get;
+	rte_dmadev_is_valid_dev;
+	rte_dmadev_start;
+	rte_dmadev_stats_get;
+	rte_dmadev_stats_reset;
+	rte_dmadev_stop;
+	rte_dmadev_submit;
+	rte_dmadev_vchan_setup;
+
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4..a542c23 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -44,6 +44,7 @@ libraries = [
         'power',
         'pdump',
         'rawdev',
+        'dmadev',
         'regexdev',
         'rib',
         'reorder',
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v18 2/8] dmadev: introduce DMA device library internal header
  2021-09-02 10:54 ` [dpdk-dev] [PATCH v18 0/8] support dmadev Chengwen Feng
  2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 1/8] dmadev: introduce DMA device library public APIs Chengwen Feng
@ 2021-09-02 10:54   ` Chengwen Feng
  2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 3/8] dmadev: introduce DMA device library PMD header Chengwen Feng
                     ` (5 subsequent siblings)
  7 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-02 10:54 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

This patch introduce DMA device library internal header, which contains
internal data types that are used by the DMA devices in order to expose
their ops to the class.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 lib/dmadev/meson.build       |   1 +
 lib/dmadev/rte_dmadev_core.h | 176 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 177 insertions(+)
 create mode 100644 lib/dmadev/rte_dmadev_core.h

diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index 6d5bd85..f421ec1 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -2,3 +2,4 @@
 # Copyright(c) 2021 HiSilicon Limited.
 
 headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000..c44d88e
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#ifndef _RTE_DMADEV_CORE_H_
+#define _RTE_DMADEV_CORE_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device internal header.
+ *
+ * This header contains internal data types, that are used by the DMA devices
+ * in order to expose their ops to the class.
+ *
+ * Applications should not use these API directly.
+ *
+ */
+
+struct rte_dmadev;
+
+typedef int (*rte_dmadev_info_get_t)(const struct rte_dmadev *dev,
+				     struct rte_dmadev_info *dev_info,
+				     uint32_t info_sz);
+/**< @internal Used to get device information of a device. */
+
+typedef int (*rte_dmadev_configure_t)(struct rte_dmadev *dev,
+				      const struct rte_dmadev_conf *dev_conf);
+/**< @internal Used to configure a device. */
+
+typedef int (*rte_dmadev_start_t)(struct rte_dmadev *dev);
+/**< @internal Used to start a configured device. */
+
+typedef int (*rte_dmadev_stop_t)(struct rte_dmadev *dev);
+/**< @internal Used to stop a configured device. */
+
+typedef int (*rte_dmadev_close_t)(struct rte_dmadev *dev);
+/**< @internal Used to close a configured device. */
+
+typedef int (*rte_dmadev_vchan_setup_t)(struct rte_dmadev *dev, uint16_t vchan,
+				const struct rte_dmadev_vchan_conf *conf);
+/**< @internal Used to allocate and set up a virtual DMA channel. */
+
+typedef int (*rte_dmadev_stats_get_t)(const struct rte_dmadev *dev,
+			uint16_t vchan, struct rte_dmadev_stats *stats,
+			uint32_t stats_sz);
+/**< @internal Used to retrieve basic statistics. */
+
+typedef int (*rte_dmadev_stats_reset_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to reset basic statistics. */
+
+typedef int (*rte_dmadev_dump_t)(const struct rte_dmadev *dev, FILE *f);
+/**< @internal Used to dump internal information. */
+
+typedef int (*rte_dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 rte_iova_t src, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a copy operation. */
+
+typedef int (*rte_dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
+				    const struct rte_dmadev_sge *src,
+				    const struct rte_dmadev_sge *dst,
+				    uint16_t nb_src, uint16_t nb_dst,
+				    uint64_t flags);
+/**< @internal Used to enqueue a scatter-gather list copy operation. */
+
+typedef int (*rte_dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 uint64_t pattern, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a fill operation. */
+
+typedef int (*rte_dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to trigger hardware to begin working. */
+
+typedef uint16_t (*rte_dmadev_completed_t)(struct rte_dmadev *dev,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+/**< @internal Used to return number of successful completed operations. */
+
+typedef uint16_t (*rte_dmadev_completed_status_t)(struct rte_dmadev *dev,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+/**< @internal Used to return number of completed operations. */
+
+/**
+ * Possible states of a DMA device.
+ */
+enum rte_dmadev_state {
+	RTE_DMADEV_UNUSED = 0,
+	/**< Device is unused before being probed. */
+	RTE_DMADEV_ATTACHED,
+	/**< Device is attached when allocated in probing. */
+};
+
+/**
+ * DMA device operations function pointer table
+ */
+struct rte_dmadev_ops {
+	rte_dmadev_info_get_t dev_info_get;
+	rte_dmadev_configure_t dev_configure;
+	rte_dmadev_start_t dev_start;
+	rte_dmadev_stop_t dev_stop;
+	rte_dmadev_close_t dev_close;
+	rte_dmadev_vchan_setup_t vchan_setup;
+	rte_dmadev_stats_get_t stats_get;
+	rte_dmadev_stats_reset_t stats_reset;
+	rte_dmadev_dump_t dev_dump;
+};
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_dmadev_data {
+	void *dev_private;
+	/**< PMD-specific private data.
+	 * This is a copy of the 'dev_private' field in the 'struct rte_dmadev'
+	 * from primary process, it is used by the secondary process to get
+	 * dev_private information.
+	 */
+	uint16_t dev_id; /**< Device [external] identifier. */
+	char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ *
+ * The dataplane APIs are located at the beginning of the structure, along
+ * with the pointer to where all the data elements for the particular device
+ * are stored in shared memory. This split scheme allows the function pointer
+ * and driver data to be per-process, while the actual configuration data for
+ * the device is shared.
+ * And the 'dev_private' field was placed in the first cache line to optimize
+ * performance because the PMD driver mainly depends on this field.
+ */
+struct rte_dmadev {
+	rte_dmadev_copy_t copy;
+	rte_dmadev_copy_sg_t copy_sg;
+	rte_dmadev_fill_t fill;
+	rte_dmadev_submit_t submit;
+	rte_dmadev_completed_t completed;
+	rte_dmadev_completed_status_t completed_status;
+	void *reserved_ptr; /**< Reserved for future IO function. */
+	void *dev_private;
+	/**< PMD-specific private data.
+	 *
+	 * - If is the primary process, after dmadev allocated by
+	 * rte_dmadev_pmd_allocate(), the PCI/SoC device probing should
+	 * initialize this field, and copy it's value to the 'dev_private'
+	 * field of 'struct rte_dmadev_data' which pointer by 'data' filed.
+	 *
+	 * - If is the secondary process, dmadev framework will initialize this
+	 * field by copy from 'dev_private' field of 'struct rte_dmadev_data'
+	 * which initialized by primary process.
+	 *
+	 * @note It's the primary process responsibility to deinitialize this
+	 * field after invoke rte_dmadev_pmd_release() in the PCI/SoC device
+	 * removing stage.
+	 */
+	struct rte_dmadev_data *data; /**< Pointer to device data. */
+	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
+	struct rte_device *device;
+	/**< Device info which supplied during device initialization. */
+	enum rte_dmadev_state state; /**< Flag indicating the device state. */
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+} __rte_cache_aligned;
+
+#endif /* _RTE_DMADEV_CORE_H_ */
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v18 3/8] dmadev: introduce DMA device library PMD header
  2021-09-02 10:54 ` [dpdk-dev] [PATCH v18 0/8] support dmadev Chengwen Feng
  2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 1/8] dmadev: introduce DMA device library public APIs Chengwen Feng
  2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 2/8] dmadev: introduce DMA device library internal header Chengwen Feng
@ 2021-09-02 10:54   ` Chengwen Feng
  2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 4/8] dmadev: introduce DMA device library implementation Chengwen Feng
                     ` (4 subsequent siblings)
  7 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-02 10:54 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

This patch introduce DMA device library PMD header which was driver
facing APIs for a DMA device.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 lib/dmadev/meson.build      |  1 +
 lib/dmadev/rte_dmadev.h     |  2 ++
 lib/dmadev/rte_dmadev_pmd.h | 72 +++++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/version.map      | 10 +++++++
 4 files changed, 85 insertions(+)
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h

diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index f421ec1..833baf7 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -3,3 +3,4 @@
 
 headers = files('rte_dmadev.h')
 indirect_headers += files('rte_dmadev_core.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 40ae3b1..fad8e00 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -728,6 +728,8 @@ struct rte_dmadev_sge {
 	uint32_t length; /**< The DMA operation length. */
 };
 
+#include "rte_dmadev_core.h"
+
 /* DMA flags to augment operation preparation. */
 #define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
 /**< DMA fence flag.
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000..45141f9
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_PMD_H_
+#define _RTE_DMADEV_PMD_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device PMD APIs
+ *
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param dev
+ *   Device to be released.
+ *
+ * @return
+ *   - 0 on success, negative on error
+ */
+__rte_internal
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev);
+
+/**
+ * @internal
+ * Return the DMA device based on the device name.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_PMD_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index 2e37882..d027eea 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -22,3 +22,13 @@ EXPERIMENTAL {
 
 	local: *;
 };
+
+INTERNAL {
+        global:
+
+	rte_dmadev_get_device_by_name;
+	rte_dmadev_pmd_allocate;
+	rte_dmadev_pmd_release;
+
+	local: *;
+};
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v18 4/8] dmadev: introduce DMA device library implementation
  2021-09-02 10:54 ` [dpdk-dev] [PATCH v18 0/8] support dmadev Chengwen Feng
                     ` (2 preceding siblings ...)
  2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 3/8] dmadev: introduce DMA device library PMD header Chengwen Feng
@ 2021-09-02 10:54   ` Chengwen Feng
  2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 5/8] doc: add DMA device library guide Chengwen Feng
                     ` (3 subsequent siblings)
  7 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-02 10:54 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

This patch introduce DMA device library implementation which includes
configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 config/rte_config.h          |   3 +
 lib/dmadev/meson.build       |   1 +
 lib/dmadev/rte_dmadev.c      | 614 +++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev.h      | 118 ++++++++-
 lib/dmadev/rte_dmadev_core.h |   2 +
 lib/dmadev/version.map       |   1 +
 6 files changed, 727 insertions(+), 12 deletions(-)
 create mode 100644 lib/dmadev/rte_dmadev.c

diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c..331a431 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -81,6 +81,9 @@
 /* rawdev defines */
 #define RTE_RAWDEV_MAX_DEVS 64
 
+/* dmadev defines */
+#define RTE_DMADEV_MAX_DEVS 64
+
 /* ip_fragmentation defines */
 #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
 #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index 833baf7..d2fc85e 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2021 HiSilicon Limited.
 
+sources = files('rte_dmadev.c')
 headers = files('rte_dmadev.h')
 indirect_headers += files('rte_dmadev_core.h')
 driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000..877eead
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,614 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
+
+static const char *mz_rte_dmadev_data = "rte_dmadev_data";
+/* Shared memory between primary and secondary processes. */
+static struct {
+	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
+} *dmadev_shared_data;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dmadev_logtype, INFO);
+#define RTE_DMADEV_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "%s(): " fmt "\n", \
+		__func__, ##args)
+
+/* Macros to check for valid device id */
+#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dmadev_is_valid_dev(dev_id)) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+static int
+dmadev_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMADEV_LOG(ERR, "Name can't be NULL");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMADEV_LOG(ERR, "Zero length DMA device name");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
+		RTE_DMADEV_LOG(ERR, "DMA device name is too long");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint16_t
+dmadev_find_free_dev(void)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (dmadev_shared_data->data[i].dev_name[0] == '\0')
+			return i;
+	}
+
+	return RTE_DMADEV_MAX_DEVS;
+}
+
+static struct rte_dmadev*
+dmadev_find(const char *name)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&
+		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
+			return &rte_dmadevices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dmadev_shared_data_prepare(void)
+{
+	const struct rte_memzone *mz;
+
+	if (dmadev_shared_data == NULL) {
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			/* Allocate port data and ownership shared memory. */
+			mz = rte_memzone_reserve(mz_rte_dmadev_data,
+					 sizeof(*dmadev_shared_data),
+					 rte_socket_id(), 0);
+		} else
+			mz = rte_memzone_lookup(mz_rte_dmadev_data);
+		if (mz == NULL)
+			return -ENOMEM;
+
+		dmadev_shared_data = mz->addr;
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+			memset(dmadev_shared_data->data, 0,
+			       sizeof(dmadev_shared_data->data));
+	}
+
+	return 0;
+}
+
+static struct rte_dmadev *
+dmadev_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t dev_id;
+
+	dev = dmadev_find(name);
+	if (dev != NULL) {
+		RTE_DMADEV_LOG(ERR, "DMA device already allocated");
+		return NULL;
+	}
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data");
+		return NULL;
+	}
+
+	dev_id = dmadev_find_free_dev();
+	if (dev_id == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices");
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+	dev->data = &dmadev_shared_data->data[dev_id];
+	dev->data->dev_id = dev_id;
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+
+	return dev;
+}
+
+static struct rte_dmadev *
+dmadev_attach_secondary(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t i;
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data");
+		return NULL;
+	}
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %s is not driven by the primary process",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[i];
+	dev->data = &dmadev_shared_data->data[i];
+	dev->dev_private = dev->data->dev_private;
+
+	return dev;
+}
+
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dmadev_allocate(name);
+	else
+		dev = dmadev_attach_secondary(name);
+
+	if (dev == NULL)
+		return NULL;
+	dev->state = RTE_DMADEV_ATTACHED;
+
+	return dev;
+}
+
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev)
+{
+	void *dev_private_tmp;
+
+	if (dev == NULL)
+		return -EINVAL;
+
+	if (dev->state == RTE_DMADEV_UNUSED)
+		return 0;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
+
+	dev_private_tmp = dev->dev_private;
+	memset(dev, 0, sizeof(struct rte_dmadev));
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev->dev_private = dev_private_tmp;
+	dev->state = RTE_DMADEV_UNUSED;
+
+	return 0;
+}
+
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name)
+{
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+	return dmadev_find(name);
+}
+
+int
+rte_dmadev_get_dev_id(const char *name)
+{
+	struct rte_dmadev *dev = rte_dmadev_get_device_by_name(name);
+	if (dev != NULL)
+		return dev->data->dev_id;
+	return -EINVAL;
+}
+
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id)
+{
+	return (dev_id < RTE_DMADEV_MAX_DEVS) &&
+		rte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED;
+}
+
+uint16_t
+rte_dmadev_count(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
+			count++;
+	}
+
+	return count;
+}
+
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dmadev_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->device = dev->device;
+	dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
+
+	return 0;
+}
+
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans == 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure zero vchans", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans > info.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure too many vchans", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_silent &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_SILENT)) {
+		RTE_DMADEV_LOG(ERR, "Device %u don't support silent", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf);
+	if (ret == 0)
+		memcpy(&dev->data->dev_conf, dev_conf, sizeof(*dev_conf));
+
+	return ret;
+}
+
+int
+rte_dmadev_start(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_conf.nb_vchans == 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u must be configured first",
+			dev_id);
+		return -EINVAL;
+	}
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already started", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->data->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dmadev_stop(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started == 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already stopped", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dmadev_close(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped before closing", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	return (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+		       const struct rte_dmadev_vchan_conf *conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	bool src_is_dev, dst_is_dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail", dev_id);
+		return -EINVAL;
+	}
+	if (dev->data->dev_conf.nb_vchans == 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u must be configured first",
+			dev_id);
+		return -EINVAL;
+	}
+	if (vchan >= info.nb_vchans) {
+		RTE_DMADEV_LOG(ERR, "Device %u vchan out range!", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2mem transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2dev transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2mem transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2dev transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u number of descriptors invalid", dev_id);
+		return -EINVAL;
+	}
+	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->src_port.port_type == RTE_DMADEV_PORT_NONE && src_is_dev) ||
+	    (conf->src_port.port_type != RTE_DMADEV_PORT_NONE && !src_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u source port type invalid", dev_id);
+		return -EINVAL;
+	}
+	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->dst_port.port_type == RTE_DMADEV_PORT_NONE && dst_is_dev) ||
+	    (conf->dst_port.port_type != RTE_DMADEV_PORT_NONE && !dst_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u destination port type invalid", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, vchan, conf);
+}
+
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (stats == NULL)
+		return -EINVAL;
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	memset(stats, 0, sizeof(struct rte_dmadev_stats));
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dmadev_stats));
+}
+
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+static const char *
+dmadev_capability_name(uint64_t capability)
+{
+	static const struct {
+		uint64_t capability;
+		const char *name;
+	} capa_names[] = {
+		{ RTE_DMADEV_CAPA_MEM_TO_MEM,  "mem2mem" },
+		{ RTE_DMADEV_CAPA_MEM_TO_DEV,  "mem2dev" },
+		{ RTE_DMADEV_CAPA_DEV_TO_MEM,  "dev2mem" },
+		{ RTE_DMADEV_CAPA_DEV_TO_DEV,  "dev2dev" },
+		{ RTE_DMADEV_CAPA_SVA,         "sva"     },
+		{ RTE_DMADEV_CAPA_SILENT,      "silent"  },
+		{ RTE_DMADEV_CAPA_OPS_COPY,    "copy"    },
+		{ RTE_DMADEV_CAPA_OPS_COPY_SG, "copy_sg" },
+		{ RTE_DMADEV_CAPA_OPS_FILL,    "fill"    },
+	};
+
+	const char *name = "unknown";
+	uint32_t i;
+
+	for (i = 0; i < RTE_DIM(capa_names); i++) {
+		if (capability == capa_names[i].capability) {
+			name = capa_names[i].name;
+			break;
+		}
+	}
+
+	return name;
+}
+
+static void
+dmadev_dump_capability(FILE *f, uint64_t dev_capa)
+{
+	uint64_t capa;
+
+	fprintf(f, "  dev_capa: 0x%" PRIx64 " -", dev_capa);
+	while (dev_capa > 0) {
+		capa = 1ull << __builtin_ctzll(dev_capa);
+		fprintf(f, " %s", dmadev_capability_name(capa));
+		dev_capa &= ~capa;
+	}
+	fprintf(f, "\n");
+}
+
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (f == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail", dev_id);
+		return -EINVAL;
+	}
+
+	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
+	dmadev_dump_capability(f, info.dev_capa);
+	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
+	fprintf(f, "  nb_vchans_configured: %u\n", info.nb_vchans);
+	fprintf(f, "  silent_mode: %s\n",
+		dev->data->dev_conf.enable_silent ? "on" : "off");
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index fad8e00..e438406 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -785,9 +785,21 @@ struct rte_dmadev_sge {
  *   - other values < 0 on failure.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
-		uint32_t length, uint64_t flags);
+		uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+#endif
+
+	return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
 
 /**
  * @warning
@@ -823,10 +835,23 @@ rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
  *   - other values < 0 on failure.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
 		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
-		   uint64_t flags);
+		   uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans ||
+	    src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+#endif
+
+	return (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);
+}
 
 /**
  * @warning
@@ -858,9 +883,21 @@ rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
  *   - other values < 0 on failure.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
-		rte_iova_t dst, uint32_t length, uint64_t flags);
+		rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+#endif
+
+	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
 
 /**
  * @warning
@@ -880,8 +917,20 @@ rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
  *   0 on success. Otherwise negative value is returned.
  */
 __rte_experimental
-int
-rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
+static inline int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+#endif
+
+	return (*dev->submit)(dev, vchan);
+}
 
 /**
  * @warning
@@ -907,9 +956,37 @@ rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
  *   must be less than or equal to the value of nb_cpls.
  */
 __rte_experimental
-uint16_t
+static inline uint16_t
 rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
-		     uint16_t *last_idx, bool *has_error);
+		     uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans || nb_cpls == 0)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
 
 /**
  * @warning
@@ -939,10 +1016,27 @@ rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
  *   status array are also set.
  */
 __rte_experimental
-uint16_t
+static inline uint16_t
 rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
 			    const uint16_t nb_cpls, uint16_t *last_idx,
-			    enum rte_dma_status_code *status);
+			    enum rte_dma_status_code *status)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans ||
+	    nb_cpls == 0 || status == NULL)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
+}
 
 #ifdef __cplusplus
 }
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index c44d88e..e94aa1c 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -173,4 +173,6 @@ struct rte_dmadev {
 	uint64_t reserved[2]; /**< Reserved for future fields. */
 } __rte_cache_aligned;
 
+extern struct rte_dmadev rte_dmadevices[];
+
 #endif /* _RTE_DMADEV_CORE_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index d027eea..80be592 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -26,6 +26,7 @@ EXPERIMENTAL {
 INTERNAL {
         global:
 
+	rte_dmadevices;
 	rte_dmadev_get_device_by_name;
 	rte_dmadev_pmd_allocate;
 	rte_dmadev_pmd_release;
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v18 5/8] doc: add DMA device library guide
  2021-09-02 10:54 ` [dpdk-dev] [PATCH v18 0/8] support dmadev Chengwen Feng
                     ` (3 preceding siblings ...)
  2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 4/8] dmadev: introduce DMA device library implementation Chengwen Feng
@ 2021-09-02 10:54   ` Chengwen Feng
  2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 6/8] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
                     ` (2 subsequent siblings)
  7 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-02 10:54 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

This patch adds dmadev library guide.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Conor Walsh <conor.walsh@intel.com>
---
 doc/guides/prog_guide/dmadev.rst     | 125 ++++++++++++++++
 doc/guides/prog_guide/img/dmadev.svg | 283 +++++++++++++++++++++++++++++++++++
 doc/guides/prog_guide/index.rst      |   1 +
 3 files changed, 409 insertions(+)
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg

diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
new file mode 100644
index 0000000..e47a164
--- /dev/null
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -0,0 +1,125 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright 2021 HiSilicon Limited
+
+DMA Device Library
+====================
+
+The DMA library provides a DMA device framework for management and provisioning
+of hardware and software DMA poll mode drivers, defining generic APIs which
+support a number of different DMA operations.
+
+
+Design Principles
+-----------------
+
+The DMA library follows the same basic principles as those used in DPDK's
+Ethernet Device framework and the RegEx framework. The DMA framework provides
+a generic DMA device framework which supports both physical (hardware)
+and virtual (software) DMA devices as well as a generic DMA API which allows
+DMA devices to be managed and configured and supports DMA operations to be
+provisioned on DMA poll mode driver.
+
+.. _figure_dmadev:
+
+.. figure:: img/dmadev.*
+
+The above figure shows the model on which the DMA framework is built on:
+
+ * The DMA controller could have multiple hardware DMA channels (aka. hardware
+   DMA queues), each hardware DMA channel should be represented by a dmadev.
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+   channel represents a different transfer context. The DMA operation request
+   must be submitted to the virtual DMA channel. e.g. Application could create
+   virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+   virtual DMA channel 1 for memory-to-device transfer scenario.
+
+
+Device Management
+-----------------
+
+Device Creation
+~~~~~~~~~~~~~~~
+
+Physical DMA controllers are discovered during the PCI probe/enumeration of the
+EAL function which is executed at DPDK initialization, this is based on their
+PCI BDF (bus/bridge, device, function). Specific physical DMA controllers, like
+other physical devices in DPDK can be listed using the EAL command line options.
+
+The dmadevs are dynamically allocated by using the API
+``rte_dmadev_pmd_allocate`` based on the number of hardware DMA channels.
+
+
+Device Identification
+~~~~~~~~~~~~~~~~~~~~~
+
+Each DMA device, whether physical or virtual is uniquely designated by two
+identifiers:
+
+- A unique device index used to designate the DMA device in all functions
+  exported by the DMA API.
+
+- A device name used to designate the DMA device in console messages, for
+  administration or debugging purposes.
+
+
+Device Configuration
+~~~~~~~~~~~~~~~~~~~~
+
+The rte_dmadev_configure API is used to configure a DMA device.
+
+.. code-block:: c
+
+   int rte_dmadev_configure(uint16_t dev_id,
+                            const struct rte_dmadev_conf *dev_conf);
+
+The ``rte_dmadev_conf`` structure is used to pass the configuration parameters
+for the DMA device for example the number of virtual DMA channels to set up,
+indication of whether to enable silent mode.
+
+
+Configuration of Virtual DMA Channels
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The rte_dmadev_vchan_setup API is used to configure a virtual DMA channel.
+
+.. code-block:: c
+
+   int rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+                              const struct rte_dmadev_vchan_conf *conf);
+
+The ``rte_dmadev_vchan_conf`` structure is used to pass the configuration
+parameters for the virtual DMA channel for example transfer direction, number of
+descriptor for the virtual DMA channel, source device access port parameter,
+destination device access port parameter.
+
+
+Device Features and Capabilities
+--------------------------------
+
+DMA devices may support different feature sets. The ``rte_dmadev_info_get`` API
+can be used to get the device info and supported features.
+
+Silent mode is a special device capability which does not require the
+application to invoke dequeue APIs.
+
+
+Enqueue / Dequeue APIs
+~~~~~~~~~~~~~~~~~~~~~~
+
+Enqueue APIs such as ``rte_dmadev_copy`` and ``rte_dmadev_fill`` can be used to
+enqueue operations to hardware. If an enqueue is successful, a ``ring_idx`` is
+returned. This ``ring_idx`` can be used by applications to track per operation
+metadata in an application-defined circular ring.
+
+The ``rte_dmadev_submit`` API is used to issue doorbell to hardware.
+Alternatively the ``RTE_DMA_OP_FLAG_SUBMIT`` flag can be passed to the enqueue
+APIs to also issue the doorbell to hardware.
+
+There are two dequeue APIs ``rte_dmadev_completed`` and
+``rte_dmadev_completed_status``, these are used to obtain the results of
+the enqueue requests. ``rte_dmadev_completed`` will return the number of
+successfully completed operations. ``rte_dmadev_completed_status`` will return
+the number of completed operations along with the status of each operation
+(filled into the ``status`` array passed by user). These two APIs can also
+return the last completed operation's ``ring_idx`` which could help user track
+operations within their own application-defined rings.
diff --git a/doc/guides/prog_guide/img/dmadev.svg b/doc/guides/prog_guide/img/dmadev.svg
new file mode 100644
index 0000000..157d7eb
--- /dev/null
+++ b/doc/guides/prog_guide/img/dmadev.svg
@@ -0,0 +1,283 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<!-- SPDX-License-Identifier: BSD-3-Clause -->
+<!-- Copyright(c) 2021 HiSilicon Limited -->
+
+<svg
+   width="128.64288mm"
+   height="95.477707mm"
+   viewBox="0 0 192.96433 143.21656"
+   version="1.1"
+   id="svg934"
+   inkscape:version="1.1 (c68e22c387, 2021-05-23)"
+   sodipodi:docname="dmadev.svg"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg">
+  <sodipodi:namedview
+     id="namedview936"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageshadow="2"
+     inkscape:pageopacity="0.0"
+     inkscape:pagecheckerboard="0"
+     inkscape:document-units="mm"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:showpageshadow="false"
+     inkscape:zoom="1.332716"
+     inkscape:cx="335.03011"
+     inkscape:cy="143.69152"
+     inkscape:window-width="1920"
+     inkscape:window-height="976"
+     inkscape:window-x="-8"
+     inkscape:window-y="-8"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="layer1"
+     scale-x="1.5"
+     units="mm" />
+  <defs
+     id="defs931">
+    <rect
+       x="342.43954"
+       y="106.56832"
+       width="58.257381"
+       height="137.82834"
+       id="rect17873" />
+  </defs>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-0.13857517,-21.527306)">
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9"
+       width="50"
+       height="28"
+       x="0.13857517"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1"
+       transform="translate(-49.110795,15.205683)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1045">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1047">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5"
+       width="50"
+       height="28"
+       x="60.138577"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4"
+       transform="translate(10.512565,15.373298)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1049">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1051">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5-3"
+       width="50"
+       height="28"
+       x="137.43863"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-8"
+       transform="translate(88.79231,15.373299)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1053">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1055">channel</tspan></text>
+    <text
+       xml:space="preserve"
+       transform="matrix(0.26458333,0,0,0.26458333,-0.04940429,21.408845)"
+       id="text17871"
+       style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;white-space:pre;shape-inside:url(#rect17873);fill:#000000;fill-opacity:1;stroke:none" />
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8"
+       width="38.34557"
+       height="19.729115"
+       x="36.138577"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3"
+       transform="translate(-13.394978,59.135217)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1057">dmadev</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0"
+       width="60.902534"
+       height="24.616455"
+       x="25.196909"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76"
+       transform="translate(-24.485484,90.97883)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1059">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1061">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-6"
+       width="60.902534"
+       height="24.616455"
+       x="132.20036"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-7"
+       transform="translate(82.950904,90.79085)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1063">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1065">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-4"
+       width="60.902534"
+       height="24.616455"
+       x="76.810928"
+       y="140.12741"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-4"
+       transform="translate(27.032341,133.10574)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1067">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1069">controller</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8-5"
+       width="38.34557"
+       height="19.729115"
+       x="143.43863"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-7"
+       transform="translate(94.92597,59.664385)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1071">dmadev</tspan></text>
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 74.476373,49.527306 62.82407,64.827354"
+       id="path45308"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 35.924309,49.527306 47.711612,64.827354"
+       id="path45310"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 55.403414,84.556469 55.53332,98.47744"
+       id="path45312"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8"
+       inkscape:connection-end="#rect31-9-5-8-0" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.62241,84.556469 0.0155,13.920971"
+       id="path45320"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-5"
+       inkscape:connection-end="#rect31-9-5-8-0-6" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 146.28317,123.09389 -22.65252,17.03352"
+       id="path45586"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0-6"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 70.900938,123.09389 21.108496,17.03352"
+       id="path45588"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.50039,49.527306 0.0675,15.300048"
+       id="path45956"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-3"
+       inkscape:connection-end="#rect31-9-5-8-5" />
+  </g>
+</svg>
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 2dce507..0abea06 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -29,6 +29,7 @@ Programmer's Guide
     regexdev
     rte_security
     rawdev
+    dmadev
     link_bonding_poll_mode_drv_lib
     timer_lib
     hash_lib
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v18 6/8] dma/skeleton: introduce skeleton dmadev driver
  2021-09-02 10:54 ` [dpdk-dev] [PATCH v18 0/8] support dmadev Chengwen Feng
                     ` (4 preceding siblings ...)
  2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 5/8] doc: add DMA device library guide Chengwen Feng
@ 2021-09-02 10:54   ` Chengwen Feng
  2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 7/8] app/test: add dmadev API test Chengwen Feng
  2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 8/8] maintainers: add for dmadev Chengwen Feng
  7 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-02 10:54 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

Skeleton dmadevice driver, on the lines of rawdev skeleton, is for
showcasing of the dmadev library.

Design of skeleton involves a virtual device which is plugged into VDEV
bus on initialization.

Also, enable compilation of dmadev skeleton drivers.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
 drivers/dma/meson.build                |  11 +
 drivers/dma/skeleton/meson.build       |   7 +
 drivers/dma/skeleton/skeleton_dmadev.c | 601 +++++++++++++++++++++++++++++++++
 drivers/dma/skeleton/skeleton_dmadev.h |  59 ++++
 drivers/dma/skeleton/version.map       |   3 +
 drivers/meson.build                    |   1 +
 6 files changed, 682 insertions(+)
 create mode 100644 drivers/dma/meson.build
 create mode 100644 drivers/dma/skeleton/meson.build
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
 create mode 100644 drivers/dma/skeleton/version.map

diff --git a/drivers/dma/meson.build b/drivers/dma/meson.build
new file mode 100644
index 0000000..0c2c34c
--- /dev/null
+++ b/drivers/dma/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+if is_windows
+    subdir_done()
+endif
+
+drivers = [
+        'skeleton',
+]
+std_deps = ['dmadev']
diff --git a/drivers/dma/skeleton/meson.build b/drivers/dma/skeleton/meson.build
new file mode 100644
index 0000000..27509b1
--- /dev/null
+++ b/drivers/dma/skeleton/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+deps += ['dmadev', 'kvargs', 'ring', 'bus_vdev']
+sources = files(
+        'skeleton_dmadev.c',
+)
diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c
new file mode 100644
index 0000000..7033062
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.c
@@ -0,0 +1,601 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_kvargs.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_ring.h>
+
+#include <rte_dmadev_pmd.h>
+
+#include "skeleton_dmadev.h"
+
+RTE_LOG_REGISTER_DEFAULT(skeldma_logtype, INFO);
+#define SKELDMA_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, skeldma_logtype, "%s(): " fmt "\n", \
+		__func__, ##args)
+
+/* Count of instances */
+static uint16_t skeldma_init_once;
+
+static int
+skeldma_info_get(const struct rte_dmadev *dev, struct rte_dmadev_info *dev_info,
+		 uint32_t info_sz)
+{
+#define SKELDMA_MAX_DESC	8192
+#define SKELDMA_MIN_DESC	128
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(info_sz);
+
+	dev_info->dev_capa = RTE_DMADEV_CAPA_MEM_TO_MEM |
+			     RTE_DMADEV_CAPA_SVA |
+			     RTE_DMADEV_CAPA_OPS_COPY;
+	dev_info->max_vchans = 1;
+	dev_info->max_desc = SKELDMA_MAX_DESC;
+	dev_info->min_desc = SKELDMA_MIN_DESC;
+
+	return 0;
+}
+
+static int
+skeldma_configure(struct rte_dmadev *dev, const struct rte_dmadev_conf *conf)
+{
+	RTE_SET_USED(dev);
+	RTE_SET_USED(conf);
+	return 0;
+}
+
+static void *
+cpucopy_thread(void *param)
+{
+#define SLEEP_THRESHOLD		10000
+#define SLEEP_US_VAL		10
+
+	struct rte_dmadev *dev = (struct rte_dmadev *)param;
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	int ret;
+
+	while (!hw->exit_flag) {
+		ret = rte_ring_dequeue(hw->desc_running, (void **)&desc);
+		if (ret) {
+			hw->zero_req_count++;
+			if (hw->zero_req_count > SLEEP_THRESHOLD) {
+				if (hw->zero_req_count == 0)
+					hw->zero_req_count = SLEEP_THRESHOLD;
+				rte_delay_us_sleep(SLEEP_US_VAL);
+			}
+			continue;
+		}
+
+		hw->zero_req_count = 0;
+		rte_memcpy(desc->dst, desc->src, desc->len);
+		hw->completed_count++;
+		(void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
+	}
+
+	return NULL;
+}
+
+static void
+fflush_ring(struct skeldma_hw *hw, struct rte_ring *ring)
+{
+	struct skeldma_desc *desc = NULL;
+	while (rte_ring_count(ring) > 0) {
+		(void)rte_ring_dequeue(ring, (void **)&desc);
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+}
+
+static int
+skeldma_start(struct rte_dmadev *dev)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	rte_cpuset_t cpuset;
+	int ret;
+
+	if (hw->desc_mem == NULL) {
+		SKELDMA_LOG(ERR, "Vchan was not setup, start fail!");
+		return -EINVAL;
+	}
+
+	/* Reset the dmadev to a known state, include:
+	 * 1) fflush pending/running/completed ring to empty ring.
+	 * 2) init ring idx to zero.
+	 * 3) init running statistics.
+	 * 4) mark cpucopy task exit_flag to false.
+	 */
+	fflush_ring(hw, hw->desc_pending);
+	fflush_ring(hw, hw->desc_running);
+	fflush_ring(hw, hw->desc_completed);
+	hw->ridx = 0;
+	hw->submitted_count = 0;
+	hw->zero_req_count = 0;
+	hw->completed_count = 0;
+	hw->exit_flag = false;
+
+	rte_mb();
+
+	ret = rte_ctrl_thread_create(&hw->thread, "dma_skeleton", NULL,
+				     cpucopy_thread, dev);
+	if (ret) {
+		SKELDMA_LOG(ERR, "Start cpucopy thread fail!");
+		return -EINVAL;
+	}
+
+	if (hw->lcore_id != -1) {
+		cpuset = rte_lcore_cpuset(hw->lcore_id);
+		ret = pthread_setaffinity_np(hw->thread, sizeof(cpuset),
+					     &cpuset);
+		if (ret)
+			SKELDMA_LOG(WARNING,
+				"Set thread affinity lcore = %u fail!",
+				hw->lcore_id);
+	}
+
+	return 0;
+}
+
+static int
+skeldma_stop(struct rte_dmadev *dev)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	hw->exit_flag = true;
+	rte_delay_ms(1);
+
+	pthread_cancel(hw->thread);
+	pthread_join(hw->thread, NULL);
+
+	return 0;
+}
+
+static int
+vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc)
+{
+	struct skeldma_desc *desc;
+	struct rte_ring *empty;
+	struct rte_ring *pending;
+	struct rte_ring *running;
+	struct rte_ring *completed;
+	uint16_t i;
+
+	desc = rte_zmalloc_socket("dma_skelteon_desc",
+				  nb_desc * sizeof(struct skeldma_desc),
+				  RTE_CACHE_LINE_SIZE, hw->socket_id);
+	if (desc == NULL) {
+		SKELDMA_LOG(ERR, "Malloc dma skeleton desc fail!");
+		return -ENOMEM;
+	}
+
+	empty = rte_ring_create("dma_skeleton_desc_empty", nb_desc,
+				hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	pending = rte_ring_create("dma_skeleton_desc_pending", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	running = rte_ring_create("dma_skeleton_desc_running", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	completed = rte_ring_create("dma_skeleton_desc_completed", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (empty == NULL || pending == NULL || running == NULL ||
+	    completed == NULL) {
+		SKELDMA_LOG(ERR, "Create dma skeleton desc ring fail!");
+		rte_ring_free(empty);
+		rte_ring_free(pending);
+		rte_ring_free(running);
+		rte_ring_free(completed);
+		rte_free(desc);
+		return -ENOMEM;
+	}
+
+	/* The real usable ring size is *count-1* instead of *count* to
+	 * differentiate a free ring from an empty ring.
+	 * @see rte_ring_create
+	 */
+	for (i = 0; i < nb_desc - 1; i++)
+		(void)rte_ring_enqueue(empty, (void *)(desc + i));
+
+	hw->desc_mem = desc;
+	hw->desc_empty = empty;
+	hw->desc_pending = pending;
+	hw->desc_running = running;
+	hw->desc_completed = completed;
+
+	return 0;
+}
+
+static void
+vchan_release(struct skeldma_hw *hw)
+{
+	if (hw->desc_mem == NULL)
+		return;
+
+	rte_free(hw->desc_mem);
+	hw->desc_mem = NULL;
+	rte_ring_free(hw->desc_empty);
+	hw->desc_empty = NULL;
+	rte_ring_free(hw->desc_pending);
+	hw->desc_pending = NULL;
+	rte_ring_free(hw->desc_running);
+	hw->desc_running = NULL;
+	rte_ring_free(hw->desc_completed);
+	hw->desc_completed = NULL;
+}
+
+static int
+skeldma_close(struct rte_dmadev *dev)
+{
+	/* The device already stopped */
+	vchan_release(dev->dev_private);
+	return 0;
+}
+
+static int
+skeldma_vchan_setup(struct rte_dmadev *dev, uint16_t vchan,
+		    const struct rte_dmadev_vchan_conf *conf)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+
+	if (!rte_is_power_of_2(conf->nb_desc)) {
+		SKELDMA_LOG(ERR, "Number of desc must be power of 2!");
+		return -EINVAL;
+	}
+
+	vchan_release(hw);
+	return vchan_setup(hw, conf->nb_desc);
+}
+
+static int
+skeldma_stats_get(const struct rte_dmadev *dev, uint16_t vchan,
+		  struct rte_dmadev_stats *stats, uint32_t stats_sz)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(stats_sz);
+
+	stats->submitted = hw->submitted_count;
+	stats->completed = hw->completed_count;
+	stats->errors = 0;
+
+	return 0;
+}
+
+static int
+skeldma_stats_reset(struct rte_dmadev *dev, uint16_t vchan)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+
+	hw->submitted_count = 0;
+	hw->completed_count = 0;
+
+	return 0;
+}
+
+static int
+skeldma_dump(const struct rte_dmadev *dev, FILE *f)
+{
+#define GET_RING_COUNT(ring)	((ring) ? (rte_ring_count(ring)) : 0)
+
+	struct skeldma_hw *hw = dev->dev_private;
+
+	fprintf(f,
+		"  lcore_id: %d\n"
+		"  socket_id: %d\n"
+		"  desc_empty_ring_count: %u\n"
+		"  desc_pending_ring_count: %u\n"
+		"  desc_running_ring_count: %u\n"
+		"  desc_completed_ring_count: %u\n",
+		hw->lcore_id, hw->socket_id,
+		GET_RING_COUNT(hw->desc_empty),
+		GET_RING_COUNT(hw->desc_pending),
+		GET_RING_COUNT(hw->desc_running),
+		GET_RING_COUNT(hw->desc_completed));
+	fprintf(f,
+		"  next_ring_idx: %u\n"
+		"  submitted_count: %" PRIu64 "\n"
+		"  completed_count: %" PRIu64 "\n",
+		hw->ridx, hw->submitted_count, hw->completed_count);
+
+	return 0;
+}
+
+static inline void
+submit(struct skeldma_hw *hw, struct skeldma_desc *desc)
+{
+	uint16_t count = rte_ring_count(hw->desc_pending);
+	struct skeldma_desc *pend_desc = NULL;
+
+	while (count > 0) {
+		(void)rte_ring_dequeue(hw->desc_pending, (void **)&pend_desc);
+		(void)rte_ring_enqueue(hw->desc_running, (void *)pend_desc);
+		count--;
+	}
+
+	if (desc)
+		(void)rte_ring_enqueue(hw->desc_running, (void *)desc);
+}
+
+static int
+skeldma_copy(struct rte_dmadev *dev, uint16_t vchan,
+	     rte_iova_t src, rte_iova_t dst,
+	     uint32_t length, uint64_t flags)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc;
+	int ret;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(flags);
+
+	ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc);
+	if (ret)
+		return -ENOSPC;
+	desc->src = (void *)(uintptr_t)src;
+	desc->dst = (void *)(uintptr_t)dst;
+	desc->len = length;
+	desc->ridx = hw->ridx;
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT)
+		submit(hw, desc);
+	else
+		(void)rte_ring_enqueue(hw->desc_pending, (void *)desc);
+	hw->submitted_count++;
+
+	return hw->ridx++;
+}
+
+static int
+skeldma_submit(struct rte_dmadev *dev, uint16_t vchan)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	RTE_SET_USED(vchan);
+	submit(hw, NULL);
+	return 0;
+}
+
+static uint16_t
+skeldma_completed(struct rte_dmadev *dev,
+		  uint16_t vchan, const uint16_t nb_cpls,
+		  uint16_t *last_idx, bool *has_error)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	uint16_t index = 0;
+	uint16_t count;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(has_error);
+
+	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+	while (index < count) {
+		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+		if (index == count - 1)
+			*last_idx = desc->ridx;
+		index++;
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+
+	return count;
+}
+
+static uint16_t
+skeldma_completed_status(struct rte_dmadev *dev,
+			 uint16_t vchan, const uint16_t nb_cpls,
+			 uint16_t *last_idx, enum rte_dma_status_code *status)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	uint16_t index = 0;
+	uint16_t count;
+
+	RTE_SET_USED(vchan);
+
+	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+	while (index < count) {
+		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+		if (index == count - 1)
+			*last_idx = desc->ridx;
+		status[index++] = RTE_DMA_STATUS_SUCCESSFUL;
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+
+	return count;
+}
+
+static const struct rte_dmadev_ops skeldma_ops = {
+	.dev_info_get = skeldma_info_get,
+	.dev_configure = skeldma_configure,
+	.dev_start = skeldma_start,
+	.dev_stop = skeldma_stop,
+	.dev_close = skeldma_close,
+
+	.vchan_setup = skeldma_vchan_setup,
+
+	.stats_get = skeldma_stats_get,
+	.stats_reset = skeldma_stats_reset,
+
+	.dev_dump = skeldma_dump,
+};
+
+static int
+skeldma_create(const char *name, struct rte_vdev_device *vdev, int lcore_id)
+{
+	struct rte_dmadev *dev;
+	struct skeldma_hw *hw;
+	int socket_id;
+
+	dev = rte_dmadev_pmd_allocate(name);
+	if (dev == NULL) {
+		SKELDMA_LOG(ERR, "Unable to allocate dmadev: %s", name);
+		return -EINVAL;
+	}
+
+	socket_id = (lcore_id < 0) ? rte_socket_id() :
+				     rte_lcore_to_socket_id(lcore_id);
+	dev->dev_private = rte_zmalloc_socket("dmadev private",
+					      sizeof(struct skeldma_hw),
+					      RTE_CACHE_LINE_SIZE,
+					      socket_id);
+	if (!dev->dev_private) {
+		SKELDMA_LOG(ERR, "Unable to allocate device private memory");
+		(void)rte_dmadev_pmd_release(dev);
+		return -ENOMEM;
+	}
+
+	dev->copy = skeldma_copy;
+	dev->submit = skeldma_submit;
+	dev->completed = skeldma_completed;
+	dev->completed_status = skeldma_completed_status;
+	dev->dev_ops = &skeldma_ops;
+	dev->device = &vdev->device;
+
+	hw = dev->dev_private;
+	hw->lcore_id = lcore_id;
+	hw->socket_id = socket_id;
+
+	return dev->data->dev_id;
+}
+
+static int
+skeldma_destroy(const char *name)
+{
+	struct rte_dmadev *dev;
+	int ret;
+
+	dev = rte_dmadev_get_device_by_name(name);
+	if (!dev)
+		return -EINVAL;
+
+	ret = rte_dmadev_close(dev->data->dev_id);
+	if (ret)
+		return ret;
+
+	rte_free(dev->dev_private);
+	dev->dev_private = NULL;
+	(void)rte_dmadev_pmd_release(dev);
+
+	return 0;
+}
+
+static int
+skeldma_parse_lcore(const char *key __rte_unused,
+		    const char *value,
+		    void *opaque)
+{
+	int lcore_id = atoi(value);
+	if (lcore_id >= 0 && lcore_id < RTE_MAX_LCORE)
+		*(int *)opaque = lcore_id;
+	return 0;
+}
+
+static void
+skeldma_parse_vdev_args(struct rte_vdev_device *vdev, int *lcore_id)
+{
+	static const char *const args[] = {
+		SKELDMA_ARG_LCORE,
+		NULL
+	};
+
+	struct rte_kvargs *kvlist;
+	const char *params;
+
+	params = rte_vdev_device_args(vdev);
+	if (params == NULL || params[0] == '\0')
+		return;
+
+	kvlist = rte_kvargs_parse(params, args);
+	if (!kvlist)
+		return;
+
+	(void)rte_kvargs_process(kvlist, SKELDMA_ARG_LCORE,
+				 skeldma_parse_lcore, lcore_id);
+	SKELDMA_LOG(INFO, "Parse lcore_id = %d", *lcore_id);
+
+	rte_kvargs_free(kvlist);
+}
+
+static int
+skeldma_probe(struct rte_vdev_device *vdev)
+{
+	const char *name;
+	int lcore_id = -1;
+	int ret;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		SKELDMA_LOG(ERR, "Multiple process not supported for %s", name);
+		return -EINVAL;
+	}
+
+	/* More than one instance is not supported */
+	if (skeldma_init_once) {
+		SKELDMA_LOG(ERR, "Multiple instance not supported for %s",
+			name);
+		return -EINVAL;
+	}
+
+	skeldma_parse_vdev_args(vdev, &lcore_id);
+
+	ret = skeldma_create(name, vdev, lcore_id);
+	if (ret >= 0) {
+		SKELDMA_LOG(INFO, "Create %s dmadev with lcore-id %d",
+			name, lcore_id);
+		/* Device instance created; Second instance not possible */
+		skeldma_init_once = 1;
+	}
+
+	return ret < 0 ? ret : 0;
+}
+
+static int
+skeldma_remove(struct rte_vdev_device *vdev)
+{
+	const char *name;
+	int ret;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -1;
+
+	ret = skeldma_destroy(name);
+	if (!ret) {
+		skeldma_init_once = 0;
+		SKELDMA_LOG(INFO, "Remove %s dmadev", name);
+	}
+
+	return ret;
+}
+
+static struct rte_vdev_driver skeldma_pmd_drv = {
+	.probe = skeldma_probe,
+	.remove = skeldma_remove,
+	.drv_flags = RTE_VDEV_DRV_NEED_IOVA_AS_VA,
+};
+
+RTE_PMD_REGISTER_VDEV(dma_skeleton, skeldma_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(dma_skeleton,
+		SKELDMA_ARG_LCORE "=<uint16> ");
diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h
new file mode 100644
index 0000000..46ff000
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef __SKELETON_DMADEV_H__
+#define __SKELETON_DMADEV_H__
+
+#include <rte_dmadev.h>
+
+#define SKELDMA_ARG_LCORE	"lcore"
+
+struct skeldma_desc {
+	void *src;
+	void *dst;
+	uint32_t len;
+	uint16_t ridx; /* ring idx */
+};
+
+struct skeldma_hw {
+	int lcore_id; /* cpucopy task affinity core */
+	int socket_id;
+	pthread_t thread; /* cpucopy task thread */
+	volatile int exit_flag; /* cpucopy task exit flag */
+
+	struct skeldma_desc *desc_mem;
+
+	/* Descriptor ring state machine:
+	 *
+	 *  -----------     enqueue without submit     -----------
+	 *  |  empty  |------------------------------->| pending |
+	 *  -----------\                               -----------
+	 *       ^      \------------                       |
+	 *       |                  |                       |submit doorbell
+	 *       |                  |                       |
+	 *       |                  |enqueue with submit    |
+	 *       |get completed     |------------------|    |
+	 *       |                                     |    |
+	 *       |                                     v    v
+	 *  -----------     cpucopy thread working     -----------
+	 *  |completed|<-------------------------------| running |
+	 *  -----------                                -----------
+	 */
+	struct rte_ring *desc_empty;
+	struct rte_ring *desc_pending;
+	struct rte_ring *desc_running;
+	struct rte_ring *desc_completed;
+
+	/* Cache delimiter for dataplane API's operation data */
+	char cache1 __rte_cache_aligned;
+	uint16_t ridx;  /* ring idx */
+	uint64_t submitted_count;
+
+	/* Cache delimiter for cpucopy thread's operation data */
+	char cache2 __rte_cache_aligned;
+	uint32_t zero_req_count;
+	uint64_t completed_count;
+};
+
+#endif /* __SKELETON_DMADEV_H__ */
diff --git a/drivers/dma/skeleton/version.map b/drivers/dma/skeleton/version.map
new file mode 100644
index 0000000..c2e0723
--- /dev/null
+++ b/drivers/dma/skeleton/version.map
@@ -0,0 +1,3 @@
+DPDK_22 {
+	local: *;
+};
diff --git a/drivers/meson.build b/drivers/meson.build
index d9e331e..a390787 100644
--- a/drivers/meson.build
+++ b/drivers/meson.build
@@ -18,6 +18,7 @@ subdirs = [
         'vdpa',           # depends on common, bus and mempool.
         'event',          # depends on common, bus, mempool and net.
         'baseband',       # depends on common and bus.
+        'dma',            # depends on common and bus.
 ]
 
 if meson.is_cross_build()
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v18 7/8] app/test: add dmadev API test
  2021-09-02 10:54 ` [dpdk-dev] [PATCH v18 0/8] support dmadev Chengwen Feng
                     ` (5 preceding siblings ...)
  2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 6/8] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
@ 2021-09-02 10:54   ` Chengwen Feng
  2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 8/8] maintainers: add for dmadev Chengwen Feng
  7 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-02 10:54 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

This patch add dmadev API test which based on 'dma_skeleton' vdev. The
test cases could be executed using 'dmadev_autotest' command in test
framework.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 app/test/meson.build       |   4 +
 app/test/test_dmadev.c     |  45 ++++
 app/test/test_dmadev_api.c | 532 +++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 581 insertions(+)
 create mode 100644 app/test/test_dmadev.c
 create mode 100644 app/test/test_dmadev_api.c

diff --git a/app/test/meson.build b/app/test/meson.build
index a761168..9027eba 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -43,6 +43,8 @@ test_sources = files(
         'test_debug.c',
         'test_distributor.c',
         'test_distributor_perf.c',
+        'test_dmadev.c',
+        'test_dmadev_api.c',
         'test_eal_flags.c',
         'test_eal_fs.c',
         'test_efd.c',
@@ -162,6 +164,7 @@ test_deps = [
         'cmdline',
         'cryptodev',
         'distributor',
+        'dmadev',
         'efd',
         'ethdev',
         'eventdev',
@@ -333,6 +336,7 @@ driver_test_names = [
         'cryptodev_sw_mvsam_autotest',
         'cryptodev_sw_snow3g_autotest',
         'cryptodev_sw_zuc_autotest',
+        'dmadev_autotest',
         'eventdev_selftest_octeontx',
         'eventdev_selftest_sw',
         'rawdev_autotest',
diff --git a/app/test/test_dmadev.c b/app/test/test_dmadev.c
new file mode 100644
index 0000000..bb01e86
--- /dev/null
+++ b/app/test/test_dmadev.c
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_dmadev.h>
+#include <rte_bus_vdev.h>
+
+#include "test.h"
+
+/* from test_dmadev_api.c */
+extern int test_dmadev_api(uint16_t dev_id);
+
+static int
+test_apis(void)
+{
+	const char *pmd = "dma_skeleton";
+	int id;
+	int ret;
+
+	if (rte_vdev_init(pmd, NULL) < 0)
+		return TEST_SKIPPED;
+	id = rte_dmadev_get_dev_id(pmd);
+	if (id < 0)
+		return TEST_SKIPPED;
+	printf("\n### Test dmadev infrastructure using skeleton driver\n");
+	ret = test_dmadev_api(id);
+	rte_vdev_uninit(pmd);
+
+	return ret;
+}
+
+static int
+test_dmadev(void)
+{
+	/* basic sanity on dmadev infrastructure */
+	if (test_apis() < 0)
+		return -1;
+
+	return 0;
+}
+
+REGISTER_TEST_COMMAND(dmadev_autotest, test_dmadev);
diff --git a/app/test/test_dmadev_api.c b/app/test/test_dmadev_api.c
new file mode 100644
index 0000000..a7795eb
--- /dev/null
+++ b/app/test/test_dmadev_api.c
@@ -0,0 +1,532 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_test.h>
+#include <rte_dmadev.h>
+
+extern int test_dmadev_api(uint16_t dev_id);
+
+#define SKELDMA_TEST_RUN(test) \
+	testsuite_run_test(test, #test)
+
+#define TEST_MEMCPY_SIZE	1024
+#define TEST_WAIT_US_VAL	50000
+
+#define TEST_SUCCESS 0
+#define TEST_FAILED  -1
+
+static uint16_t test_dev_id;
+static uint16_t invalid_dev_id;
+
+static int total;
+static int passed;
+static int failed;
+static char *src;
+static char *dst;
+
+static int
+testsuite_setup(uint16_t dev_id)
+{
+	test_dev_id = dev_id;
+	invalid_dev_id = RTE_DMADEV_MAX_DEVS;
+
+	src = rte_malloc("dmadev_test_src", TEST_MEMCPY_SIZE, 0);
+	if (src == NULL)
+		return -ENOMEM;
+	dst = rte_malloc("dmadev_test_dst", TEST_MEMCPY_SIZE, 0);
+	if (dst == NULL)
+		return -ENOMEM;
+
+	total = 0;
+	passed = 0;
+	failed = 0;
+
+	return 0;
+}
+
+static void
+testsuite_teardown(void)
+{
+	rte_free(src);
+	rte_free(dst);
+	/* Ensure the dmadev is stopped. */
+	rte_dmadev_stop(test_dev_id);
+}
+
+static void
+testsuite_run_test(int (*test)(void), const char *name)
+{
+	int ret = 0;
+
+	if (test) {
+		ret = test();
+		if (ret < 0) {
+			failed++;
+			printf("%s Failed\n", name);
+		} else {
+			passed++;
+			printf("%s Passed\n", name);
+		}
+	}
+
+	total++;
+}
+
+static int
+test_dmadev_get_dev_id(void)
+{
+	int ret = rte_dmadev_get_dev_id("invalid_dmadev_device");
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_is_valid_dev(void)
+{
+	int ret;
+	ret = rte_dmadev_is_valid_dev(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == false, "Expected false for invalid dev id");
+	ret = rte_dmadev_is_valid_dev(test_dev_id);
+	RTE_TEST_ASSERT(ret == true, "Expected true for valid dev id");
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_count(void)
+{
+	uint16_t count = rte_dmadev_count();
+	RTE_TEST_ASSERT(count > 0, "Invalid dmadev count %u", count);
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_info_get(void)
+{
+	struct rte_dmadev_info info =  { 0 };
+	int ret;
+
+	ret = rte_dmadev_info_get(invalid_dev_id, &info);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_info_get(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_configure(void)
+{
+	struct rte_dmadev_conf conf = { 0 };
+	struct rte_dmadev_info info = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_configure(invalid_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_configure(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for nb_vchans == 0 */
+	memset(&conf, 0, sizeof(conf));
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for conf.nb_vchans > info.max_vchans */
+	ret = rte_dmadev_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans + 1;
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check enable silent mode */
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans;
+	conf.enable_silent = true;
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Configure success */
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans;
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret);
+
+	/* Check configure success */
+	ret = rte_dmadev_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	RTE_TEST_ASSERT_EQUAL(conf.nb_vchans, info.nb_vchans,
+			      "Configure nb_vchans not match");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_vchan_setup(void)
+{
+	struct rte_dmadev_vchan_conf vchan_conf = { 0 };
+	struct rte_dmadev_conf dev_conf = { 0 };
+	struct rte_dmadev_info dev_info = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_vchan_setup(invalid_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Make sure configure success */
+	ret = rte_dmadev_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	dev_conf.nb_vchans = dev_info.max_vchans;
+	ret = rte_dmadev_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret);
+
+	/* Check for invalid vchan */
+	ret = rte_dmadev_vchan_setup(test_dev_id, dev_conf.nb_vchans,
+				     &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for direction */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV + 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM - 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for direction and dev_capa combination */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_DEV;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_MEM;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for nb_desc validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc - 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.nb_desc = dev_info.max_desc + 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check src port type validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	vchan_conf.src_port.port_type = RTE_DMADEV_PORT_PCIE;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check dst port type validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	vchan_conf.dst_port.port_type = RTE_DMADEV_PORT_PCIE;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check vchan setup success */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+setup_one_vchan(void)
+{
+	struct rte_dmadev_vchan_conf vchan_conf = { 0 };
+	struct rte_dmadev_info dev_info = { 0 };
+	struct rte_dmadev_conf dev_conf = { 0 };
+	int ret;
+
+	ret = rte_dmadev_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+	dev_conf.nb_vchans = dev_info.max_vchans;
+	ret = rte_dmadev_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_start_stop(void)
+{
+	struct rte_dmadev_vchan_conf vchan_conf = { 0 };
+	struct rte_dmadev_conf dev_conf = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_start(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stop(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dmadev_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Check reconfigure and vchan setup when device started */
+	ret = rte_dmadev_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT(ret == -EBUSY, "Failed to configure, %d", ret);
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EBUSY, "Failed to setup vchan, %d", ret);
+
+	ret = rte_dmadev_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_stats(void)
+{
+	struct rte_dmadev_info dev_info = { 0 };
+	struct rte_dmadev_stats stats = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_stats_get(invalid_dev_id, 0, &stats);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stats_get(invalid_dev_id, 0, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stats_reset(invalid_dev_id, 0);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	/* Check for invalid vchan */
+	ret = rte_dmadev_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+	ret = rte_dmadev_stats_get(test_dev_id, dev_info.max_vchans, &stats);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stats_reset(test_dev_id, dev_info.max_vchans);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for valid vchan */
+	ret = rte_dmadev_stats_get(test_dev_id, 0, &stats);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get stats, %d", ret);
+	ret = rte_dmadev_stats_get(test_dev_id, RTE_DMADEV_ALL_VCHAN, &stats);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get all stats, %d", ret);
+	ret = rte_dmadev_stats_reset(test_dev_id, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset stats, %d", ret);
+	ret = rte_dmadev_stats_reset(test_dev_id, RTE_DMADEV_ALL_VCHAN);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset all stats, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_dump(void)
+{
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_dump(invalid_dev_id, stderr);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Excepted -EINVAL, %d", ret);
+	ret = rte_dmadev_dump(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Excepted -EINVAL, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_completed(void)
+{
+	uint16_t last_idx = 1;
+	bool has_error = true;
+	uint16_t cpl_ret;
+	int ret, i;
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dmadev_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Setup test memory */
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++)
+		src[i] = (char)i;
+	memset(dst, 0, TEST_MEMCPY_SIZE);
+
+	/* Check enqueue without submit */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, 0);
+	RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed(test_dev_id, 0, 1, &last_idx,
+				       &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to get completed");
+
+	/* Check add submit */
+	ret = rte_dmadev_submit(test_dev_id, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to submit, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed(test_dev_id, 0, 1, &last_idx,
+				       &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u",
+				last_idx);
+	RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++) {
+		if (src[i] != dst[i]) {
+			RTE_TEST_ASSERT_EQUAL(src[i], dst[i],
+				"Failed to copy memory, %d %d", src[i], dst[i]);
+			break;
+		}
+	}
+
+	/* Setup test memory */
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++)
+		src[i] = (char)i;
+	memset(dst, 0, TEST_MEMCPY_SIZE);
+
+	/* Check for enqueue with submit */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 1, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed(test_dev_id, 0, 1, &last_idx,
+				       &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u",
+				last_idx);
+	RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++) {
+		if (src[i] != dst[i]) {
+			RTE_TEST_ASSERT_EQUAL(src[i], dst[i],
+				"Failed to copy memory, %d %d", src[i], dst[i]);
+			break;
+		}
+	}
+
+	/* Stop dmadev to make sure dmadev to a known state */
+	ret = rte_dmadev_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_completed_status(void)
+{
+	enum rte_dma_status_code status[1] = { 1 };
+	uint16_t last_idx = 1;
+	uint16_t cpl_ret, i;
+	int ret;
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dmadev_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Check for enqueue with submit */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed_status(test_dev_id, 0, 1, &last_idx,
+					      status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u",
+				last_idx);
+	for (i = 0; i < RTE_DIM(status); i++)
+		RTE_TEST_ASSERT_EQUAL(status[i], 0,
+				"Failed to completed status, %d", status[i]);
+
+	/* Check do completed status again */
+	cpl_ret = rte_dmadev_completed_status(test_dev_id, 0, 1, &last_idx,
+					      status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to completed status");
+
+	/* Check for enqueue with submit again */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 1, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed_status(test_dev_id, 0, 1, &last_idx,
+					      status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u",
+				last_idx);
+	for (i = 0; i < RTE_DIM(status); i++)
+		RTE_TEST_ASSERT_EQUAL(status[i], 0,
+				"Failed to completed status, %d", status[i]);
+
+	/* Stop dmadev to make sure dmadev to a known state */
+	ret = rte_dmadev_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+int
+test_dmadev_api(uint16_t dev_id)
+{
+	int ret = testsuite_setup(dev_id);
+	if (ret) {
+		printf("testsuite setup fail!\n");
+		return -1;
+	}
+
+	/* If the testcase exit successfully, ensure that the test dmadev exist
+	 * and the dmadev is in the stopped state.
+	 */
+	SKELDMA_TEST_RUN(test_dmadev_get_dev_id);
+	SKELDMA_TEST_RUN(test_dmadev_is_valid_dev);
+	SKELDMA_TEST_RUN(test_dmadev_count);
+	SKELDMA_TEST_RUN(test_dmadev_info_get);
+	SKELDMA_TEST_RUN(test_dmadev_configure);
+	SKELDMA_TEST_RUN(test_dmadev_vchan_setup);
+	SKELDMA_TEST_RUN(test_dmadev_start_stop);
+	SKELDMA_TEST_RUN(test_dmadev_stats);
+	SKELDMA_TEST_RUN(test_dmadev_dump);
+	SKELDMA_TEST_RUN(test_dmadev_completed);
+	SKELDMA_TEST_RUN(test_dmadev_completed_status);
+
+	testsuite_teardown();
+
+	printf("Total tests   : %d\n", total);
+	printf("Passed        : %d\n", passed);
+	printf("Failed        : %d\n", failed);
+
+	if (failed)
+		return -1;
+
+	return 0;
+};
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v18 8/8] maintainers: add for dmadev
  2021-09-02 10:54 ` [dpdk-dev] [PATCH v18 0/8] support dmadev Chengwen Feng
                     ` (6 preceding siblings ...)
  2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 7/8] app/test: add dmadev API test Chengwen Feng
@ 2021-09-02 10:54   ` Chengwen Feng
  2021-09-02 11:51     ` Bruce Richardson
  7 siblings, 1 reply; 339+ messages in thread
From: Chengwen Feng @ 2021-09-02 10:54 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

This patch add myself as dmadev's maintainer and update release notes.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
 MAINTAINERS                            | 7 +++++++
 doc/guides/rel_notes/release_21_11.rst | 5 +++++
 2 files changed, 12 insertions(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index 7be9658..17763c8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -496,6 +496,13 @@ F: drivers/raw/skeleton/
 F: app/test/test_rawdev.c
 F: doc/guides/prog_guide/rawdev.rst
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+F: drivers/dma/skeleton/
+F: app/test/test_dmadev*
+F: doc/guides/prog_guide/dmadev.rst
+
 
 Memory Pool Drivers
 -------------------
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index d707a55..78b9691 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -55,6 +55,11 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Added dmadev library support.**
+
+  The dmadev library provides a DMA device framework for management and
+  provision of hardware and software DMA devices.
+
 
 Removed Items
 -------------
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v18 8/8] maintainers: add for dmadev
  2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 8/8] maintainers: add for dmadev Chengwen Feng
@ 2021-09-02 11:51     ` Bruce Richardson
  2021-09-02 13:39       ` fengchengwen
  0 siblings, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-09-02 11:51 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

On Thu, Sep 02, 2021 at 06:54:17PM +0800, Chengwen Feng wrote:
> This patch add myself as dmadev's maintainer and update release notes.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com> ---

Just in case you are doing any further revisions of this patchset, the
maintainers entry, and RN entry, generally is added in the first patch, so
squash this patch into patch #1.

^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v19 0/7] support dmadev
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (21 preceding siblings ...)
  2021-09-02 10:54 ` [dpdk-dev] [PATCH v18 0/8] support dmadev Chengwen Feng
@ 2021-09-02 13:13 ` Chengwen Feng
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library public APIs Chengwen Feng
                     ` (6 more replies)
  2021-09-04 10:10 ` [dpdk-dev] [PATCH v20 0/7] support dmadev Chengwen Feng
                   ` (6 subsequent siblings)
  29 siblings, 7 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-02 13:13 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

This patch set contains seven patch for new add dmadev.

Chengwen Feng (7):
  dmadev: introduce DMA device library public APIs
  dmadev: introduce DMA device library internal header
  dmadev: introduce DMA device library PMD header
  dmadev: introduce DMA device library implementation
  doc: add DMA device library guide
  dma/skeleton: introduce skeleton dmadev driver
  app/test: add dmadev API test

---
v19:
* squash maintainer patch to patch #1.
v18:
* RTE_DMA_STATUS_* add BUS_READ/WRITE_ERR, PAGE_FAULT.
* rte_dmadev dataplane API add judge dev_started when debug enable.
* rte_dmadev_start/vchan_setup add judge device configured.
* rte_dmadev_dump support format capability name.
* optimized the comments of rte_dmadev.
* fix skeldma_copy always return zero when enqueue successful.
* log encapsulation macro add newline characters.
* test_dmadev_api support rte_dmadev_dump() ut.
v17:
* remove rte_dmadev_selftest() API.
* move dmadev API test from dma/skeleton to app/test.
* fix compile error of dma/skeleton driver when building for x86-32.
* fix iol spell check warning of dmadev.rst.
v16:
* redefine struct rte_dmadev_stats with fields:
  submitted, completed, errors.
* add dma skeleton.
* add dmadev ut.

 MAINTAINERS                            |    7 +
 app/test/meson.build                   |    4 +
 app/test/test_dmadev.c                 |   45 ++
 app/test/test_dmadev_api.c             |  532 ++++++++++++++++
 config/rte_config.h                    |    3 +
 doc/api/doxy-api-index.md              |    1 +
 doc/api/doxy-api.conf.in               |    1 +
 doc/guides/prog_guide/dmadev.rst       |  125 ++++
 doc/guides/prog_guide/img/dmadev.svg   |  283 +++++++++
 doc/guides/prog_guide/index.rst        |    1 +
 doc/guides/rel_notes/release_21_11.rst |    5 +
 drivers/dma/meson.build                |   11 +
 drivers/dma/skeleton/meson.build       |    7 +
 drivers/dma/skeleton/skeleton_dmadev.c |  601 ++++++++++++++++++
 drivers/dma/skeleton/skeleton_dmadev.h |   59 ++
 drivers/dma/skeleton/version.map       |    3 +
 drivers/meson.build                    |    1 +
 lib/dmadev/meson.build                 |    7 +
 lib/dmadev/rte_dmadev.c                |  614 +++++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 1045 ++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h           |  178 ++++++
 lib/dmadev/rte_dmadev_pmd.h            |   72 +++
 lib/dmadev/version.map                 |   35 ++
 lib/meson.build                        |    1 +
 24 files changed, 3641 insertions(+)
 create mode 100644 app/test/test_dmadev.c
 create mode 100644 app/test/test_dmadev_api.c
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg
 create mode 100644 drivers/dma/meson.build
 create mode 100644 drivers/dma/skeleton/meson.build
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
 create mode 100644 drivers/dma/skeleton/version.map
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library public APIs
  2021-09-02 13:13 ` [dpdk-dev] [PATCH v19 0/7] support dmadev Chengwen Feng
@ 2021-09-02 13:13   ` Chengwen Feng
  2021-09-03 11:42     ` Gagandeep Singh
                       ` (3 more replies)
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 2/7] dmadev: introduce DMA device library internal header Chengwen Feng
                     ` (5 subsequent siblings)
  6 siblings, 4 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-02 13:13 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

The 'dmadevice' is a generic type of DMA device.

This patch introduce the 'dmadevice' public APIs which expose generic
operations that can enable configuration and I/O with the DMA devices.

Maintainers update is also included in this patch.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
---
 MAINTAINERS                            |   4 +
 doc/api/doxy-api-index.md              |   1 +
 doc/api/doxy-api.conf.in               |   1 +
 doc/guides/rel_notes/release_21_11.rst |   5 +
 lib/dmadev/meson.build                 |   4 +
 lib/dmadev/rte_dmadev.h                | 949 +++++++++++++++++++++++++++++++++
 lib/dmadev/version.map                 |  24 +
 lib/meson.build                        |   1 +
 8 files changed, 989 insertions(+)
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 7be9658..22dcd12 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -496,6 +496,10 @@ F: drivers/raw/skeleton/
 F: app/test/test_rawdev.c
 F: doc/guides/prog_guide/rawdev.rst
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+
 
 Memory Pool Drivers
 -------------------
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107..ce08250 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -27,6 +27,7 @@ The public API headers are grouped by topics:
   [event_timer_adapter]    (@ref rte_event_timer_adapter.h),
   [event_crypto_adapter]   (@ref rte_event_crypto_adapter.h),
   [rawdev]             (@ref rte_rawdev.h),
+  [dmadev]             (@ref rte_dmadev.h),
   [metrics]            (@ref rte_metrics.h),
   [bitrate]            (@ref rte_bitrate.h),
   [latency]            (@ref rte_latencystats.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 325a019..a44a92b 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -34,6 +34,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/cmdline \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
+                          @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/distributor \
                           @TOPDIR@/lib/efd \
                           @TOPDIR@/lib/ethdev \
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index d707a55..78b9691 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -55,6 +55,11 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Added dmadev library support.**
+
+  The dmadev library provides a DMA device framework for management and
+  provision of hardware and software DMA devices.
+
 
 Removed Items
 -------------
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000..6d5bd85
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+headers = files('rte_dmadev.h')
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000..40ae3b1
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,949 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ * Copyright(c) 2021 Marvell International Ltd.
+ * Copyright(c) 2021 SmartShare Systems.
+ */
+
+#ifndef _RTE_DMADEV_H_
+#define _RTE_DMADEV_H_
+
+/**
+ * @file rte_dmadev.h
+ *
+ * RTE DMA (Direct Memory Access) device APIs.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW-DMA-channel |               | HW-DMA-channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW-DMA-Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel. e.g. Application could create
+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+ * virtual DMA channel 1 for memory-to-device transfer scenario.
+ *
+ * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
+ * PCI/SoC device probing phase performed at EAL initialization time. And could
+ * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
+ * phase.
+ *
+ * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dmadev_configure()
+ *     - rte_dmadev_vchan_setup()
+ *     - rte_dmadev_start()
+ *
+ * Then, the application can invoke dataplane APIs to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dmadev_configure() or rte_dmadev_vchan_setup()), it must invoke
+ * rte_dmadev_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dmadev_start() again. The dataplane APIs should not be
+ * invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the
+ * rte_dmadev_close() function.
+ *
+ * The dataplane APIs include two parts:
+ * The first part is the submission of operation requests:
+ *     - rte_dmadev_copy()
+ *     - rte_dmadev_copy_sg()
+ *     - rte_dmadev_fill()
+ *     - rte_dmadev_submit()
+ *
+ * These APIs could work with different virtual DMA channels which have
+ * different contexts.
+ *
+ * The first three APIs are used to submit the operation request to the virtual
+ * DMA channel, if the submission is successful, an uint16_t ring_idx is
+ * returned, otherwise a negative number is returned.
+ *
+ * The last API was used to issue doorbell to hardware, and also there are flags
+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
+ * same work.
+ *
+ * The second part is to obtain the result of requests:
+ *     - rte_dmadev_completed()
+ *         - return the number of operation requests completed successfully.
+ *     - rte_dmadev_completed_status()
+ *         - return the number of operation requests completed.
+ *
+ * @note The two completed APIs also support return the last completed
+ * operation's ring_idx.
+ * @note If the dmadev works in silent mode (@see RTE_DMADEV_CAPA_SILENT),
+ * application does not invoke the above two completed APIs.
+ *
+ * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
+ * rte_dmadev_fill()) returned, the rules are as follows:
+ *     - ring_idx for each virtual DMA channel are independent.
+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *       when it reach UINT16_MAX, it wraps back to zero.
+ *     - This ring_idx can be used by applications to track per-operation
+ *       metadata in an application-defined circular ring.
+ *     - The initial ring_idx of a virtual DMA channel is zero, after the
+ *       device is stopped, the ring_idx needs to be reset to zero.
+ *
+ * One example:
+ *     - step-1: start one dmadev
+ *     - step-2: enqueue a copy operation, the ring_idx return is 0
+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1
+ *     - ...
+ *     - step-101: stop the dmadev
+ *     - step-102: start the dmadev
+ *     - step-103: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *
+ * The DMA operation address used in enqueue APIs (i.e. rte_dmadev_copy(),
+ * rte_dmadev_copy_sg(), rte_dmadev_fill()) defined as rte_iova_t type. The
+ * dmadev supports two types of address: memory address and device address.
+ *
+ * - memory address: the source and destination address of the memory-to-memory
+ * transfer type, or the source address of the memory-to-device transfer type,
+ * or the destination address of the device-to-memory transfer type.
+ * @note If the device support SVA (@see RTE_DMADEV_CAPA_SVA), the memory
+ * address can be any VA address, otherwise it must be an IOVA address.
+ *
+ * - device address: the source and destination address of the device-to-device
+ * transfer type, or the source address of the device-to-memory transfer type,
+ * or the destination address of the memory-to-device transfer type.
+ *
+ * By default, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
+ *
+ */
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the device identifier for the named DMA device.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   Returns DMA device identifier on success.
+ *   - <0: Failure to find named DMA device.
+ */
+__rte_experimental
+int
+rte_dmadev_get_dev_id(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_count(void);
+
+/* Enumerates DMA device capabilities. */
+#define RTE_DMADEV_CAPA_MEM_TO_MEM	(1ull << 0)
+/**< DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_MEM_TO_DEV	(1ull << 1)
+/**< DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_MEM	(1ull << 2)
+/**< DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_DEV	(1ull << 3)
+/**< DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_SVA		(1ull << 4)
+/**< DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_SILENT		(1ull << 5)
+/**< DMA device support work in silent mode.
+ * In this mode, application don't required to invoke rte_dmadev_completed*()
+ * API.
+ *
+ * @see struct rte_dmadev_conf::silent_mode
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY	(1ull << 32)
+/**< DMA device support copy ops.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY_SG	(1ull << 33)
+/**< DMA device support scatter-gather list copy ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_FILL	(1ull << 34)
+/**< DMA device support fill ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ */
+struct rte_dmadev_info {
+	struct rte_device *device; /**< Generic Device information. */
+	uint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channels supported. */
+	uint16_t max_desc;
+	/**< Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/**< Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t max_sges;
+	/**< Maximum number of source or destination scatter-gather entry
+	 * supported.
+	 * If the device does not support COPY_SG capability, this value can be
+	 * zero.
+	 * If the device supports COPY_SG capability, then rte_dmadev_copy_sg()
+	 * parameter nb_src/nb_dst should not exceed this value.
+	 */
+	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ */
+struct rte_dmadev_conf {
+	uint16_t nb_vchans;
+	/**< The number of virtual DMA channels to set up for the DMA device.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dmadev_info which get from rte_dmadev_info_get().
+	 */
+	bool enable_silent;
+	/**< Indicates whether to enable silent mode.
+	 * false-default mode, true-silent mode.
+	 * This value can be set to true only when the SILENT capability is
+	 * supported.
+	 *
+	 * @see RTE_DMADEV_CAPA_SILENT
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dmadev_conf
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_start(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dmadev_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_stop(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_close(uint16_t dev_id);
+
+/**
+ * rte_dma_direction - DMA transfer direction defines.
+ */
+enum rte_dma_direction {
+	RTE_DMA_DIR_MEM_TO_MEM,
+	/**< DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV,
+	/**< DMA transfer direction - from memory to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is SoCs memory) to device (which is host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM,
+	/**< DMA transfer direction - from device to memory.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to memory (which is SoCs memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV,
+	/**< DMA transfer direction - from device to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to the device (which is another host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+};
+
+/**
+ * enum rte_dmadev_port_type - DMA access port type defines.
+ *
+ * @see struct rte_dmadev_port_param::port_type
+ */
+enum rte_dmadev_port_type {
+	RTE_DMADEV_PORT_NONE,
+	RTE_DMADEV_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dmadev_vchan_conf::src_port
+ * @see struct rte_dmadev_vchan_conf::dst_port
+ */
+struct rte_dmadev_port_param {
+	enum rte_dmadev_port_type port_type;
+	/**< The device access port type.
+	 * @see enum rte_dmadev_port_type
+	 */
+	union {
+		/** PCIe access port parameters.
+		 *
+		 * The following model shows SoC's PCIe module connects to
+		 * multiple PCIe hosts and multiple endpoints. The PCIe module
+		 * has an integrated DMA controller.
+		 *
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * \code{.unparsed}
+		 * System Bus
+		 *    |     ----------PCIe module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIe Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIe Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIe Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * \endcode
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		struct {
+			uint64_t coreid : 4; /**< PCIe core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			uint64_t pasid : 20;
+			/**< The pasid filed in TLP packet. */
+			uint64_t attr : 3;
+			/**< The attributes filed in TLP packet. */
+			uint64_t ph : 2;
+			/**< The processing hint filed in TLP packet. */
+			uint64_t st : 16;
+			/**< The steering tag filed in TLP packet. */
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ */
+struct rte_dmadev_vchan_conf {
+	enum rte_dma_direction direction;
+	/**< Transfer direction
+	 * @see enum rte_dma_direction
+	 */
+	uint16_t nb_desc;
+	/**< Number of descriptor for the virtual DMA channel */
+	struct rte_dmadev_port_param src_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+	struct rte_dmadev_port_param dst_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel. The value must be in the range
+ *   [0, nb_vchans - 1] previously supplied to rte_dmadev_configure().
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dmadev_vchan_conf object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+		       const struct rte_dmadev_vchan_conf *conf);
+
+/**
+ * rte_dmadev_stats - running statistics.
+ */
+struct rte_dmadev_stats {
+	uint64_t submitted;
+	/**< Count of operations which were submitted to hardware. */
+	uint64_t completed;
+	/**< Count of operations which were completed. */
+	uint64_t errors;
+	/**< Count of operations which failed to complete. */
+};
+
+#define RTE_DMADEV_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dmadev_stats
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f);
+
+/**
+ * rte_dma_status_code - DMA transfer result status code defines.
+ */
+enum rte_dma_status_code {
+	RTE_DMA_STATUS_SUCCESSFUL,
+	/**< The operation completed successfully. */
+	RTE_DMA_STATUS_USER_ABORT,
+	/**< The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPTED,
+	/**< The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/**< The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/**< The operation failed to complete due invalid destination
+	 * address.
+	 */
+	RTE_DMA_STATUS_INVALID_ADDR,
+	/**< The operation failed to complete due invalid source or destination
+	 * address, cover the case that only knows the address error, but not
+	 * sure which address error.
+	 */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/**< The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/**< The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_BUS_READ_ERROR,
+	/**< The operation failed to complete due bus read error. */
+	RTE_DMA_STATUS_BUS_WRITE_ERROR,
+	/**< The operation failed to complete due bus write error. */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/**< The operation failed to complete due bus error, cover the case that
+	 * only knows the bus error, but not sure which direction error.
+	 */
+	RTE_DMA_STATUS_DATA_POISION,
+	/**< The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/**< The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/**< The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the memory-to-device/
+	 * device-to-memory/device-to-device transfer scenario.
+	 */
+	RTE_DMA_STATUS_PAGE_FAULT,
+	/**< The operation failed to complete due lookup page fault. */
+	RTE_DMA_STATUS_ERROR_UNKNOWN = 0x100,
+	/**< The operation failed to complete due unknown reason.
+	 * The initial value is 256, which reserves space for future errors.
+	 */
+};
+
+/**
+ * rte_dmadev_sge - can hold scatter-gather DMA operation request entry.
+ */
+struct rte_dmadev_sge {
+	rte_iova_t addr; /**< The DMA operation address. */
+	uint32_t length; /**< The DMA operation length. */
+};
+
+/* DMA flags to augment operation preparation. */
+#define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
+/**< DMA fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ * If the specify DMA HW works in-order (it means it has default fence between
+ * operations), this flag could be NOP.
+ *
+ * @see rte_dmadev_copy()
+ * @see rte_dmadev_copy_sg()
+ * @see rte_dmadev_fill()
+ */
+
+#define RTE_DMA_OP_FLAG_SUBMIT	(1ull << 1)
+/**< DMA submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ */
+
+#define RTE_DMA_OP_FLAG_LLC	(1ull << 2)
+/**< DMA write data to low level cache hint.
+ * Used for performance optimization, this is just a hint, and there is no
+ * capability bit for this, driver should not return error if this flag was set.
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+		uint32_t length, uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter-gather list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter-gather list copy operation to be performed by
+ * hardware, if the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then
+ * trigger doorbell to begin this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The pointer of source scatter-gather entry array.
+ * @param dst
+ *   The pointer of destination scatter-gather entry array.
+ * @param nb_src
+ *   The number of source scatter-gather entry.
+ *   @see struct rte_dmadev_info::max_sges
+ * @param nb_dst
+ *   The number of destination scatter-gather entry.
+ *   @see struct rte_dmadev_info::max_sges
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
+		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
+		   uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
+		rte_iova_t dst, uint32_t length, uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dmadev_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's ring_idx.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed. This return value
+ *   must be less than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		     uint16_t *last_idx, bool *has_error);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been completed, and the
+ * operations result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's ring_idx.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   This is a pointer to an array of length 'nb_cpls' that holds the completion
+ *   status code of each operation.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that completed. This return value must be less
+ *   than or equal to the value of nb_cpls.
+ *   If this number is greater than zero (assuming n), then n values in the
+ *   status array are also set.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
+			    const uint16_t nb_cpls, uint16_t *last_idx,
+			    enum rte_dma_status_code *status);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000..2e37882
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,24 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dmadev_close;
+	rte_dmadev_completed;
+	rte_dmadev_completed_status;
+	rte_dmadev_configure;
+	rte_dmadev_copy;
+	rte_dmadev_copy_sg;
+	rte_dmadev_count;
+	rte_dmadev_dump;
+	rte_dmadev_fill;
+	rte_dmadev_get_dev_id;
+	rte_dmadev_info_get;
+	rte_dmadev_is_valid_dev;
+	rte_dmadev_start;
+	rte_dmadev_stats_get;
+	rte_dmadev_stats_reset;
+	rte_dmadev_stop;
+	rte_dmadev_submit;
+	rte_dmadev_vchan_setup;
+
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4..a542c23 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -44,6 +44,7 @@ libraries = [
         'power',
         'pdump',
         'rawdev',
+        'dmadev',
         'regexdev',
         'rib',
         'reorder',
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v19 2/7] dmadev: introduce DMA device library internal header
  2021-09-02 13:13 ` [dpdk-dev] [PATCH v19 0/7] support dmadev Chengwen Feng
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library public APIs Chengwen Feng
@ 2021-09-02 13:13   ` Chengwen Feng
  2021-09-03 15:13     ` Kevin Laatz
  2021-09-03 15:35     ` Conor Walsh
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 3/7] dmadev: introduce DMA device library PMD header Chengwen Feng
                     ` (4 subsequent siblings)
  6 siblings, 2 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-02 13:13 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

This patch introduce DMA device library internal header, which contains
internal data types that are used by the DMA devices in order to expose
their ops to the class.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 lib/dmadev/meson.build       |   1 +
 lib/dmadev/rte_dmadev_core.h | 176 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 177 insertions(+)
 create mode 100644 lib/dmadev/rte_dmadev_core.h

diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index 6d5bd85..f421ec1 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -2,3 +2,4 @@
 # Copyright(c) 2021 HiSilicon Limited.
 
 headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000..c44d88e
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#ifndef _RTE_DMADEV_CORE_H_
+#define _RTE_DMADEV_CORE_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device internal header.
+ *
+ * This header contains internal data types, that are used by the DMA devices
+ * in order to expose their ops to the class.
+ *
+ * Applications should not use these API directly.
+ *
+ */
+
+struct rte_dmadev;
+
+typedef int (*rte_dmadev_info_get_t)(const struct rte_dmadev *dev,
+				     struct rte_dmadev_info *dev_info,
+				     uint32_t info_sz);
+/**< @internal Used to get device information of a device. */
+
+typedef int (*rte_dmadev_configure_t)(struct rte_dmadev *dev,
+				      const struct rte_dmadev_conf *dev_conf);
+/**< @internal Used to configure a device. */
+
+typedef int (*rte_dmadev_start_t)(struct rte_dmadev *dev);
+/**< @internal Used to start a configured device. */
+
+typedef int (*rte_dmadev_stop_t)(struct rte_dmadev *dev);
+/**< @internal Used to stop a configured device. */
+
+typedef int (*rte_dmadev_close_t)(struct rte_dmadev *dev);
+/**< @internal Used to close a configured device. */
+
+typedef int (*rte_dmadev_vchan_setup_t)(struct rte_dmadev *dev, uint16_t vchan,
+				const struct rte_dmadev_vchan_conf *conf);
+/**< @internal Used to allocate and set up a virtual DMA channel. */
+
+typedef int (*rte_dmadev_stats_get_t)(const struct rte_dmadev *dev,
+			uint16_t vchan, struct rte_dmadev_stats *stats,
+			uint32_t stats_sz);
+/**< @internal Used to retrieve basic statistics. */
+
+typedef int (*rte_dmadev_stats_reset_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to reset basic statistics. */
+
+typedef int (*rte_dmadev_dump_t)(const struct rte_dmadev *dev, FILE *f);
+/**< @internal Used to dump internal information. */
+
+typedef int (*rte_dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 rte_iova_t src, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a copy operation. */
+
+typedef int (*rte_dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
+				    const struct rte_dmadev_sge *src,
+				    const struct rte_dmadev_sge *dst,
+				    uint16_t nb_src, uint16_t nb_dst,
+				    uint64_t flags);
+/**< @internal Used to enqueue a scatter-gather list copy operation. */
+
+typedef int (*rte_dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 uint64_t pattern, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a fill operation. */
+
+typedef int (*rte_dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to trigger hardware to begin working. */
+
+typedef uint16_t (*rte_dmadev_completed_t)(struct rte_dmadev *dev,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+/**< @internal Used to return number of successful completed operations. */
+
+typedef uint16_t (*rte_dmadev_completed_status_t)(struct rte_dmadev *dev,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+/**< @internal Used to return number of completed operations. */
+
+/**
+ * Possible states of a DMA device.
+ */
+enum rte_dmadev_state {
+	RTE_DMADEV_UNUSED = 0,
+	/**< Device is unused before being probed. */
+	RTE_DMADEV_ATTACHED,
+	/**< Device is attached when allocated in probing. */
+};
+
+/**
+ * DMA device operations function pointer table
+ */
+struct rte_dmadev_ops {
+	rte_dmadev_info_get_t dev_info_get;
+	rte_dmadev_configure_t dev_configure;
+	rte_dmadev_start_t dev_start;
+	rte_dmadev_stop_t dev_stop;
+	rte_dmadev_close_t dev_close;
+	rte_dmadev_vchan_setup_t vchan_setup;
+	rte_dmadev_stats_get_t stats_get;
+	rte_dmadev_stats_reset_t stats_reset;
+	rte_dmadev_dump_t dev_dump;
+};
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_dmadev_data {
+	void *dev_private;
+	/**< PMD-specific private data.
+	 * This is a copy of the 'dev_private' field in the 'struct rte_dmadev'
+	 * from primary process, it is used by the secondary process to get
+	 * dev_private information.
+	 */
+	uint16_t dev_id; /**< Device [external] identifier. */
+	char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ *
+ * The dataplane APIs are located at the beginning of the structure, along
+ * with the pointer to where all the data elements for the particular device
+ * are stored in shared memory. This split scheme allows the function pointer
+ * and driver data to be per-process, while the actual configuration data for
+ * the device is shared.
+ * And the 'dev_private' field was placed in the first cache line to optimize
+ * performance because the PMD driver mainly depends on this field.
+ */
+struct rte_dmadev {
+	rte_dmadev_copy_t copy;
+	rte_dmadev_copy_sg_t copy_sg;
+	rte_dmadev_fill_t fill;
+	rte_dmadev_submit_t submit;
+	rte_dmadev_completed_t completed;
+	rte_dmadev_completed_status_t completed_status;
+	void *reserved_ptr; /**< Reserved for future IO function. */
+	void *dev_private;
+	/**< PMD-specific private data.
+	 *
+	 * - If is the primary process, after dmadev allocated by
+	 * rte_dmadev_pmd_allocate(), the PCI/SoC device probing should
+	 * initialize this field, and copy it's value to the 'dev_private'
+	 * field of 'struct rte_dmadev_data' which pointer by 'data' filed.
+	 *
+	 * - If is the secondary process, dmadev framework will initialize this
+	 * field by copy from 'dev_private' field of 'struct rte_dmadev_data'
+	 * which initialized by primary process.
+	 *
+	 * @note It's the primary process responsibility to deinitialize this
+	 * field after invoke rte_dmadev_pmd_release() in the PCI/SoC device
+	 * removing stage.
+	 */
+	struct rte_dmadev_data *data; /**< Pointer to device data. */
+	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
+	struct rte_device *device;
+	/**< Device info which supplied during device initialization. */
+	enum rte_dmadev_state state; /**< Flag indicating the device state. */
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+} __rte_cache_aligned;
+
+#endif /* _RTE_DMADEV_CORE_H_ */
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v19 3/7] dmadev: introduce DMA device library PMD header
  2021-09-02 13:13 ` [dpdk-dev] [PATCH v19 0/7] support dmadev Chengwen Feng
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library public APIs Chengwen Feng
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 2/7] dmadev: introduce DMA device library internal header Chengwen Feng
@ 2021-09-02 13:13   ` Chengwen Feng
  2021-09-03 15:13     ` Kevin Laatz
  2021-09-03 15:35     ` Conor Walsh
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 4/7] dmadev: introduce DMA device library implementation Chengwen Feng
                     ` (3 subsequent siblings)
  6 siblings, 2 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-02 13:13 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

This patch introduce DMA device library PMD header which was driver
facing APIs for a DMA device.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 lib/dmadev/meson.build      |  1 +
 lib/dmadev/rte_dmadev.h     |  2 ++
 lib/dmadev/rte_dmadev_pmd.h | 72 +++++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/version.map      | 10 +++++++
 4 files changed, 85 insertions(+)
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h

diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index f421ec1..833baf7 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -3,3 +3,4 @@
 
 headers = files('rte_dmadev.h')
 indirect_headers += files('rte_dmadev_core.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 40ae3b1..fad8e00 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -728,6 +728,8 @@ struct rte_dmadev_sge {
 	uint32_t length; /**< The DMA operation length. */
 };
 
+#include "rte_dmadev_core.h"
+
 /* DMA flags to augment operation preparation. */
 #define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
 /**< DMA fence flag.
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000..45141f9
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_PMD_H_
+#define _RTE_DMADEV_PMD_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device PMD APIs
+ *
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param dev
+ *   Device to be released.
+ *
+ * @return
+ *   - 0 on success, negative on error
+ */
+__rte_internal
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev);
+
+/**
+ * @internal
+ * Return the DMA device based on the device name.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_PMD_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index 2e37882..d027eea 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -22,3 +22,13 @@ EXPERIMENTAL {
 
 	local: *;
 };
+
+INTERNAL {
+        global:
+
+	rte_dmadev_get_device_by_name;
+	rte_dmadev_pmd_allocate;
+	rte_dmadev_pmd_release;
+
+	local: *;
+};
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v19 4/7] dmadev: introduce DMA device library implementation
  2021-09-02 13:13 ` [dpdk-dev] [PATCH v19 0/7] support dmadev Chengwen Feng
                     ` (2 preceding siblings ...)
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 3/7] dmadev: introduce DMA device library PMD header Chengwen Feng
@ 2021-09-02 13:13   ` Chengwen Feng
  2021-09-03 15:13     ` Kevin Laatz
  2021-09-03 15:35     ` Conor Walsh
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 5/7] doc: add DMA device library guide Chengwen Feng
                     ` (2 subsequent siblings)
  6 siblings, 2 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-02 13:13 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

This patch introduce DMA device library implementation which includes
configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
---
 config/rte_config.h          |   3 +
 lib/dmadev/meson.build       |   1 +
 lib/dmadev/rte_dmadev.c      | 614 +++++++++++++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev.h      | 118 ++++++++-
 lib/dmadev/rte_dmadev_core.h |   2 +
 lib/dmadev/version.map       |   1 +
 6 files changed, 727 insertions(+), 12 deletions(-)
 create mode 100644 lib/dmadev/rte_dmadev.c

diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c..331a431 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -81,6 +81,9 @@
 /* rawdev defines */
 #define RTE_RAWDEV_MAX_DEVS 64
 
+/* dmadev defines */
+#define RTE_DMADEV_MAX_DEVS 64
+
 /* ip_fragmentation defines */
 #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
 #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index 833baf7..d2fc85e 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2021 HiSilicon Limited.
 
+sources = files('rte_dmadev.c')
 headers = files('rte_dmadev.h')
 indirect_headers += files('rte_dmadev_core.h')
 driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000..877eead
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,614 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
+
+static const char *mz_rte_dmadev_data = "rte_dmadev_data";
+/* Shared memory between primary and secondary processes. */
+static struct {
+	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
+} *dmadev_shared_data;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dmadev_logtype, INFO);
+#define RTE_DMADEV_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "%s(): " fmt "\n", \
+		__func__, ##args)
+
+/* Macros to check for valid device id */
+#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dmadev_is_valid_dev(dev_id)) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+static int
+dmadev_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMADEV_LOG(ERR, "Name can't be NULL");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMADEV_LOG(ERR, "Zero length DMA device name");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
+		RTE_DMADEV_LOG(ERR, "DMA device name is too long");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint16_t
+dmadev_find_free_dev(void)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (dmadev_shared_data->data[i].dev_name[0] == '\0')
+			return i;
+	}
+
+	return RTE_DMADEV_MAX_DEVS;
+}
+
+static struct rte_dmadev*
+dmadev_find(const char *name)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&
+		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
+			return &rte_dmadevices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dmadev_shared_data_prepare(void)
+{
+	const struct rte_memzone *mz;
+
+	if (dmadev_shared_data == NULL) {
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			/* Allocate port data and ownership shared memory. */
+			mz = rte_memzone_reserve(mz_rte_dmadev_data,
+					 sizeof(*dmadev_shared_data),
+					 rte_socket_id(), 0);
+		} else
+			mz = rte_memzone_lookup(mz_rte_dmadev_data);
+		if (mz == NULL)
+			return -ENOMEM;
+
+		dmadev_shared_data = mz->addr;
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+			memset(dmadev_shared_data->data, 0,
+			       sizeof(dmadev_shared_data->data));
+	}
+
+	return 0;
+}
+
+static struct rte_dmadev *
+dmadev_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t dev_id;
+
+	dev = dmadev_find(name);
+	if (dev != NULL) {
+		RTE_DMADEV_LOG(ERR, "DMA device already allocated");
+		return NULL;
+	}
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data");
+		return NULL;
+	}
+
+	dev_id = dmadev_find_free_dev();
+	if (dev_id == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices");
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+	dev->data = &dmadev_shared_data->data[dev_id];
+	dev->data->dev_id = dev_id;
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+
+	return dev;
+}
+
+static struct rte_dmadev *
+dmadev_attach_secondary(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t i;
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data");
+		return NULL;
+	}
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %s is not driven by the primary process",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[i];
+	dev->data = &dmadev_shared_data->data[i];
+	dev->dev_private = dev->data->dev_private;
+
+	return dev;
+}
+
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dmadev_allocate(name);
+	else
+		dev = dmadev_attach_secondary(name);
+
+	if (dev == NULL)
+		return NULL;
+	dev->state = RTE_DMADEV_ATTACHED;
+
+	return dev;
+}
+
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev)
+{
+	void *dev_private_tmp;
+
+	if (dev == NULL)
+		return -EINVAL;
+
+	if (dev->state == RTE_DMADEV_UNUSED)
+		return 0;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
+
+	dev_private_tmp = dev->dev_private;
+	memset(dev, 0, sizeof(struct rte_dmadev));
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev->dev_private = dev_private_tmp;
+	dev->state = RTE_DMADEV_UNUSED;
+
+	return 0;
+}
+
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name)
+{
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+	return dmadev_find(name);
+}
+
+int
+rte_dmadev_get_dev_id(const char *name)
+{
+	struct rte_dmadev *dev = rte_dmadev_get_device_by_name(name);
+	if (dev != NULL)
+		return dev->data->dev_id;
+	return -EINVAL;
+}
+
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id)
+{
+	return (dev_id < RTE_DMADEV_MAX_DEVS) &&
+		rte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED;
+}
+
+uint16_t
+rte_dmadev_count(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
+			count++;
+	}
+
+	return count;
+}
+
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dmadev_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->device = dev->device;
+	dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
+
+	return 0;
+}
+
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans == 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure zero vchans", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans > info.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure too many vchans", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_silent &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_SILENT)) {
+		RTE_DMADEV_LOG(ERR, "Device %u don't support silent", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf);
+	if (ret == 0)
+		memcpy(&dev->data->dev_conf, dev_conf, sizeof(*dev_conf));
+
+	return ret;
+}
+
+int
+rte_dmadev_start(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_conf.nb_vchans == 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u must be configured first",
+			dev_id);
+		return -EINVAL;
+	}
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already started", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->data->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dmadev_stop(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started == 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already stopped", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dmadev_close(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped before closing", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	return (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+		       const struct rte_dmadev_vchan_conf *conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	bool src_is_dev, dst_is_dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail", dev_id);
+		return -EINVAL;
+	}
+	if (dev->data->dev_conf.nb_vchans == 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u must be configured first",
+			dev_id);
+		return -EINVAL;
+	}
+	if (vchan >= info.nb_vchans) {
+		RTE_DMADEV_LOG(ERR, "Device %u vchan out range!", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2mem transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2dev transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2mem transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2dev transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u number of descriptors invalid", dev_id);
+		return -EINVAL;
+	}
+	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->src_port.port_type == RTE_DMADEV_PORT_NONE && src_is_dev) ||
+	    (conf->src_port.port_type != RTE_DMADEV_PORT_NONE && !src_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u source port type invalid", dev_id);
+		return -EINVAL;
+	}
+	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->dst_port.port_type == RTE_DMADEV_PORT_NONE && dst_is_dev) ||
+	    (conf->dst_port.port_type != RTE_DMADEV_PORT_NONE && !dst_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u destination port type invalid", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, vchan, conf);
+}
+
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (stats == NULL)
+		return -EINVAL;
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	memset(stats, 0, sizeof(struct rte_dmadev_stats));
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dmadev_stats));
+}
+
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+static const char *
+dmadev_capability_name(uint64_t capability)
+{
+	static const struct {
+		uint64_t capability;
+		const char *name;
+	} capa_names[] = {
+		{ RTE_DMADEV_CAPA_MEM_TO_MEM,  "mem2mem" },
+		{ RTE_DMADEV_CAPA_MEM_TO_DEV,  "mem2dev" },
+		{ RTE_DMADEV_CAPA_DEV_TO_MEM,  "dev2mem" },
+		{ RTE_DMADEV_CAPA_DEV_TO_DEV,  "dev2dev" },
+		{ RTE_DMADEV_CAPA_SVA,         "sva"     },
+		{ RTE_DMADEV_CAPA_SILENT,      "silent"  },
+		{ RTE_DMADEV_CAPA_OPS_COPY,    "copy"    },
+		{ RTE_DMADEV_CAPA_OPS_COPY_SG, "copy_sg" },
+		{ RTE_DMADEV_CAPA_OPS_FILL,    "fill"    },
+	};
+
+	const char *name = "unknown";
+	uint32_t i;
+
+	for (i = 0; i < RTE_DIM(capa_names); i++) {
+		if (capability == capa_names[i].capability) {
+			name = capa_names[i].name;
+			break;
+		}
+	}
+
+	return name;
+}
+
+static void
+dmadev_dump_capability(FILE *f, uint64_t dev_capa)
+{
+	uint64_t capa;
+
+	fprintf(f, "  dev_capa: 0x%" PRIx64 " -", dev_capa);
+	while (dev_capa > 0) {
+		capa = 1ull << __builtin_ctzll(dev_capa);
+		fprintf(f, " %s", dmadev_capability_name(capa));
+		dev_capa &= ~capa;
+	}
+	fprintf(f, "\n");
+}
+
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (f == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail", dev_id);
+		return -EINVAL;
+	}
+
+	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
+	dmadev_dump_capability(f, info.dev_capa);
+	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
+	fprintf(f, "  nb_vchans_configured: %u\n", info.nb_vchans);
+	fprintf(f, "  silent_mode: %s\n",
+		dev->data->dev_conf.enable_silent ? "on" : "off");
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index fad8e00..e438406 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -785,9 +785,21 @@ struct rte_dmadev_sge {
  *   - other values < 0 on failure.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
-		uint32_t length, uint64_t flags);
+		uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+#endif
+
+	return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
 
 /**
  * @warning
@@ -823,10 +835,23 @@ rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
  *   - other values < 0 on failure.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
 		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
-		   uint64_t flags);
+		   uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans ||
+	    src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+#endif
+
+	return (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);
+}
 
 /**
  * @warning
@@ -858,9 +883,21 @@ rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
  *   - other values < 0 on failure.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
-		rte_iova_t dst, uint32_t length, uint64_t flags);
+		rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+#endif
+
+	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
 
 /**
  * @warning
@@ -880,8 +917,20 @@ rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
  *   0 on success. Otherwise negative value is returned.
  */
 __rte_experimental
-int
-rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
+static inline int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+#endif
+
+	return (*dev->submit)(dev, vchan);
+}
 
 /**
  * @warning
@@ -907,9 +956,37 @@ rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
  *   must be less than or equal to the value of nb_cpls.
  */
 __rte_experimental
-uint16_t
+static inline uint16_t
 rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
-		     uint16_t *last_idx, bool *has_error);
+		     uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans || nb_cpls == 0)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
 
 /**
  * @warning
@@ -939,10 +1016,27 @@ rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
  *   status array are also set.
  */
 __rte_experimental
-uint16_t
+static inline uint16_t
 rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
 			    const uint16_t nb_cpls, uint16_t *last_idx,
-			    enum rte_dma_status_code *status);
+			    enum rte_dma_status_code *status)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans ||
+	    nb_cpls == 0 || status == NULL)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
+}
 
 #ifdef __cplusplus
 }
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index c44d88e..e94aa1c 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -173,4 +173,6 @@ struct rte_dmadev {
 	uint64_t reserved[2]; /**< Reserved for future fields. */
 } __rte_cache_aligned;
 
+extern struct rte_dmadev rte_dmadevices[];
+
 #endif /* _RTE_DMADEV_CORE_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index d027eea..80be592 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -26,6 +26,7 @@ EXPERIMENTAL {
 INTERNAL {
         global:
 
+	rte_dmadevices;
 	rte_dmadev_get_device_by_name;
 	rte_dmadev_pmd_allocate;
 	rte_dmadev_pmd_release;
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v19 5/7] doc: add DMA device library guide
  2021-09-02 13:13 ` [dpdk-dev] [PATCH v19 0/7] support dmadev Chengwen Feng
                     ` (3 preceding siblings ...)
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 4/7] dmadev: introduce DMA device library implementation Chengwen Feng
@ 2021-09-02 13:13   ` Chengwen Feng
  2021-09-03 15:13     ` Kevin Laatz
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 6/7] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 7/7] app/test: add dmadev API test Chengwen Feng
  6 siblings, 1 reply; 339+ messages in thread
From: Chengwen Feng @ 2021-09-02 13:13 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

This patch adds dmadev library guide.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Conor Walsh <conor.walsh@intel.com>
---
 MAINTAINERS                          |   1 +
 doc/guides/prog_guide/dmadev.rst     | 125 ++++++++++++++++
 doc/guides/prog_guide/img/dmadev.svg | 283 +++++++++++++++++++++++++++++++++++
 doc/guides/prog_guide/index.rst      |   1 +
 4 files changed, 410 insertions(+)
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg

diff --git a/MAINTAINERS b/MAINTAINERS
index 22dcd12..3858aa7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -499,6 +499,7 @@ F: doc/guides/prog_guide/rawdev.rst
 DMA device API - EXPERIMENTAL
 M: Chengwen Feng <fengchengwen@huawei.com>
 F: lib/dmadev/
+F: doc/guides/prog_guide/dmadev.rst
 
 
 Memory Pool Drivers
diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
new file mode 100644
index 0000000..e47a164
--- /dev/null
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -0,0 +1,125 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright 2021 HiSilicon Limited
+
+DMA Device Library
+====================
+
+The DMA library provides a DMA device framework for management and provisioning
+of hardware and software DMA poll mode drivers, defining generic APIs which
+support a number of different DMA operations.
+
+
+Design Principles
+-----------------
+
+The DMA library follows the same basic principles as those used in DPDK's
+Ethernet Device framework and the RegEx framework. The DMA framework provides
+a generic DMA device framework which supports both physical (hardware)
+and virtual (software) DMA devices as well as a generic DMA API which allows
+DMA devices to be managed and configured and supports DMA operations to be
+provisioned on DMA poll mode driver.
+
+.. _figure_dmadev:
+
+.. figure:: img/dmadev.*
+
+The above figure shows the model on which the DMA framework is built on:
+
+ * The DMA controller could have multiple hardware DMA channels (aka. hardware
+   DMA queues), each hardware DMA channel should be represented by a dmadev.
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+   channel represents a different transfer context. The DMA operation request
+   must be submitted to the virtual DMA channel. e.g. Application could create
+   virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+   virtual DMA channel 1 for memory-to-device transfer scenario.
+
+
+Device Management
+-----------------
+
+Device Creation
+~~~~~~~~~~~~~~~
+
+Physical DMA controllers are discovered during the PCI probe/enumeration of the
+EAL function which is executed at DPDK initialization, this is based on their
+PCI BDF (bus/bridge, device, function). Specific physical DMA controllers, like
+other physical devices in DPDK can be listed using the EAL command line options.
+
+The dmadevs are dynamically allocated by using the API
+``rte_dmadev_pmd_allocate`` based on the number of hardware DMA channels.
+
+
+Device Identification
+~~~~~~~~~~~~~~~~~~~~~
+
+Each DMA device, whether physical or virtual is uniquely designated by two
+identifiers:
+
+- A unique device index used to designate the DMA device in all functions
+  exported by the DMA API.
+
+- A device name used to designate the DMA device in console messages, for
+  administration or debugging purposes.
+
+
+Device Configuration
+~~~~~~~~~~~~~~~~~~~~
+
+The rte_dmadev_configure API is used to configure a DMA device.
+
+.. code-block:: c
+
+   int rte_dmadev_configure(uint16_t dev_id,
+                            const struct rte_dmadev_conf *dev_conf);
+
+The ``rte_dmadev_conf`` structure is used to pass the configuration parameters
+for the DMA device for example the number of virtual DMA channels to set up,
+indication of whether to enable silent mode.
+
+
+Configuration of Virtual DMA Channels
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The rte_dmadev_vchan_setup API is used to configure a virtual DMA channel.
+
+.. code-block:: c
+
+   int rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+                              const struct rte_dmadev_vchan_conf *conf);
+
+The ``rte_dmadev_vchan_conf`` structure is used to pass the configuration
+parameters for the virtual DMA channel for example transfer direction, number of
+descriptor for the virtual DMA channel, source device access port parameter,
+destination device access port parameter.
+
+
+Device Features and Capabilities
+--------------------------------
+
+DMA devices may support different feature sets. The ``rte_dmadev_info_get`` API
+can be used to get the device info and supported features.
+
+Silent mode is a special device capability which does not require the
+application to invoke dequeue APIs.
+
+
+Enqueue / Dequeue APIs
+~~~~~~~~~~~~~~~~~~~~~~
+
+Enqueue APIs such as ``rte_dmadev_copy`` and ``rte_dmadev_fill`` can be used to
+enqueue operations to hardware. If an enqueue is successful, a ``ring_idx`` is
+returned. This ``ring_idx`` can be used by applications to track per operation
+metadata in an application-defined circular ring.
+
+The ``rte_dmadev_submit`` API is used to issue doorbell to hardware.
+Alternatively the ``RTE_DMA_OP_FLAG_SUBMIT`` flag can be passed to the enqueue
+APIs to also issue the doorbell to hardware.
+
+There are two dequeue APIs ``rte_dmadev_completed`` and
+``rte_dmadev_completed_status``, these are used to obtain the results of
+the enqueue requests. ``rte_dmadev_completed`` will return the number of
+successfully completed operations. ``rte_dmadev_completed_status`` will return
+the number of completed operations along with the status of each operation
+(filled into the ``status`` array passed by user). These two APIs can also
+return the last completed operation's ``ring_idx`` which could help user track
+operations within their own application-defined rings.
diff --git a/doc/guides/prog_guide/img/dmadev.svg b/doc/guides/prog_guide/img/dmadev.svg
new file mode 100644
index 0000000..157d7eb
--- /dev/null
+++ b/doc/guides/prog_guide/img/dmadev.svg
@@ -0,0 +1,283 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<!-- SPDX-License-Identifier: BSD-3-Clause -->
+<!-- Copyright(c) 2021 HiSilicon Limited -->
+
+<svg
+   width="128.64288mm"
+   height="95.477707mm"
+   viewBox="0 0 192.96433 143.21656"
+   version="1.1"
+   id="svg934"
+   inkscape:version="1.1 (c68e22c387, 2021-05-23)"
+   sodipodi:docname="dmadev.svg"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg">
+  <sodipodi:namedview
+     id="namedview936"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageshadow="2"
+     inkscape:pageopacity="0.0"
+     inkscape:pagecheckerboard="0"
+     inkscape:document-units="mm"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:showpageshadow="false"
+     inkscape:zoom="1.332716"
+     inkscape:cx="335.03011"
+     inkscape:cy="143.69152"
+     inkscape:window-width="1920"
+     inkscape:window-height="976"
+     inkscape:window-x="-8"
+     inkscape:window-y="-8"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="layer1"
+     scale-x="1.5"
+     units="mm" />
+  <defs
+     id="defs931">
+    <rect
+       x="342.43954"
+       y="106.56832"
+       width="58.257381"
+       height="137.82834"
+       id="rect17873" />
+  </defs>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-0.13857517,-21.527306)">
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9"
+       width="50"
+       height="28"
+       x="0.13857517"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1"
+       transform="translate(-49.110795,15.205683)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1045">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1047">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5"
+       width="50"
+       height="28"
+       x="60.138577"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4"
+       transform="translate(10.512565,15.373298)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1049">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1051">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5-3"
+       width="50"
+       height="28"
+       x="137.43863"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-8"
+       transform="translate(88.79231,15.373299)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1053">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1055">channel</tspan></text>
+    <text
+       xml:space="preserve"
+       transform="matrix(0.26458333,0,0,0.26458333,-0.04940429,21.408845)"
+       id="text17871"
+       style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;white-space:pre;shape-inside:url(#rect17873);fill:#000000;fill-opacity:1;stroke:none" />
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8"
+       width="38.34557"
+       height="19.729115"
+       x="36.138577"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3"
+       transform="translate(-13.394978,59.135217)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1057">dmadev</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0"
+       width="60.902534"
+       height="24.616455"
+       x="25.196909"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76"
+       transform="translate(-24.485484,90.97883)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1059">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1061">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-6"
+       width="60.902534"
+       height="24.616455"
+       x="132.20036"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-7"
+       transform="translate(82.950904,90.79085)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1063">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1065">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-4"
+       width="60.902534"
+       height="24.616455"
+       x="76.810928"
+       y="140.12741"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-4"
+       transform="translate(27.032341,133.10574)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1067">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1069">controller</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8-5"
+       width="38.34557"
+       height="19.729115"
+       x="143.43863"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-7"
+       transform="translate(94.92597,59.664385)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1071">dmadev</tspan></text>
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 74.476373,49.527306 62.82407,64.827354"
+       id="path45308"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 35.924309,49.527306 47.711612,64.827354"
+       id="path45310"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 55.403414,84.556469 55.53332,98.47744"
+       id="path45312"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8"
+       inkscape:connection-end="#rect31-9-5-8-0" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.62241,84.556469 0.0155,13.920971"
+       id="path45320"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-5"
+       inkscape:connection-end="#rect31-9-5-8-0-6" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 146.28317,123.09389 -22.65252,17.03352"
+       id="path45586"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0-6"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 70.900938,123.09389 21.108496,17.03352"
+       id="path45588"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.50039,49.527306 0.0675,15.300048"
+       id="path45956"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-3"
+       inkscape:connection-end="#rect31-9-5-8-5" />
+  </g>
+</svg>
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 2dce507..0abea06 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -29,6 +29,7 @@ Programmer's Guide
     regexdev
     rte_security
     rawdev
+    dmadev
     link_bonding_poll_mode_drv_lib
     timer_lib
     hash_lib
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v19 6/7] dma/skeleton: introduce skeleton dmadev driver
  2021-09-02 13:13 ` [dpdk-dev] [PATCH v19 0/7] support dmadev Chengwen Feng
                     ` (4 preceding siblings ...)
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 5/7] doc: add DMA device library guide Chengwen Feng
@ 2021-09-02 13:13   ` Chengwen Feng
  2021-09-03 15:14     ` Kevin Laatz
  2021-09-03 15:36     ` Conor Walsh
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 7/7] app/test: add dmadev API test Chengwen Feng
  6 siblings, 2 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-02 13:13 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

Skeleton dmadevice driver, on the lines of rawdev skeleton, is for
showcasing of the dmadev library.

Design of skeleton involves a virtual device which is plugged into VDEV
bus on initialization.

Also, enable compilation of dmadev skeleton drivers.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
---
 MAINTAINERS                            |   1 +
 drivers/dma/meson.build                |  11 +
 drivers/dma/skeleton/meson.build       |   7 +
 drivers/dma/skeleton/skeleton_dmadev.c | 601 +++++++++++++++++++++++++++++++++
 drivers/dma/skeleton/skeleton_dmadev.h |  59 ++++
 drivers/dma/skeleton/version.map       |   3 +
 drivers/meson.build                    |   1 +
 7 files changed, 683 insertions(+)
 create mode 100644 drivers/dma/meson.build
 create mode 100644 drivers/dma/skeleton/meson.build
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
 create mode 100644 drivers/dma/skeleton/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 3858aa7..e69fb28 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -499,6 +499,7 @@ F: doc/guides/prog_guide/rawdev.rst
 DMA device API - EXPERIMENTAL
 M: Chengwen Feng <fengchengwen@huawei.com>
 F: lib/dmadev/
+F: drivers/dma/skeleton/
 F: doc/guides/prog_guide/dmadev.rst
 
 
diff --git a/drivers/dma/meson.build b/drivers/dma/meson.build
new file mode 100644
index 0000000..0c2c34c
--- /dev/null
+++ b/drivers/dma/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+if is_windows
+    subdir_done()
+endif
+
+drivers = [
+        'skeleton',
+]
+std_deps = ['dmadev']
diff --git a/drivers/dma/skeleton/meson.build b/drivers/dma/skeleton/meson.build
new file mode 100644
index 0000000..27509b1
--- /dev/null
+++ b/drivers/dma/skeleton/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+deps += ['dmadev', 'kvargs', 'ring', 'bus_vdev']
+sources = files(
+        'skeleton_dmadev.c',
+)
diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c
new file mode 100644
index 0000000..7033062
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.c
@@ -0,0 +1,601 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_kvargs.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_ring.h>
+
+#include <rte_dmadev_pmd.h>
+
+#include "skeleton_dmadev.h"
+
+RTE_LOG_REGISTER_DEFAULT(skeldma_logtype, INFO);
+#define SKELDMA_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, skeldma_logtype, "%s(): " fmt "\n", \
+		__func__, ##args)
+
+/* Count of instances */
+static uint16_t skeldma_init_once;
+
+static int
+skeldma_info_get(const struct rte_dmadev *dev, struct rte_dmadev_info *dev_info,
+		 uint32_t info_sz)
+{
+#define SKELDMA_MAX_DESC	8192
+#define SKELDMA_MIN_DESC	128
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(info_sz);
+
+	dev_info->dev_capa = RTE_DMADEV_CAPA_MEM_TO_MEM |
+			     RTE_DMADEV_CAPA_SVA |
+			     RTE_DMADEV_CAPA_OPS_COPY;
+	dev_info->max_vchans = 1;
+	dev_info->max_desc = SKELDMA_MAX_DESC;
+	dev_info->min_desc = SKELDMA_MIN_DESC;
+
+	return 0;
+}
+
+static int
+skeldma_configure(struct rte_dmadev *dev, const struct rte_dmadev_conf *conf)
+{
+	RTE_SET_USED(dev);
+	RTE_SET_USED(conf);
+	return 0;
+}
+
+static void *
+cpucopy_thread(void *param)
+{
+#define SLEEP_THRESHOLD		10000
+#define SLEEP_US_VAL		10
+
+	struct rte_dmadev *dev = (struct rte_dmadev *)param;
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	int ret;
+
+	while (!hw->exit_flag) {
+		ret = rte_ring_dequeue(hw->desc_running, (void **)&desc);
+		if (ret) {
+			hw->zero_req_count++;
+			if (hw->zero_req_count > SLEEP_THRESHOLD) {
+				if (hw->zero_req_count == 0)
+					hw->zero_req_count = SLEEP_THRESHOLD;
+				rte_delay_us_sleep(SLEEP_US_VAL);
+			}
+			continue;
+		}
+
+		hw->zero_req_count = 0;
+		rte_memcpy(desc->dst, desc->src, desc->len);
+		hw->completed_count++;
+		(void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
+	}
+
+	return NULL;
+}
+
+static void
+fflush_ring(struct skeldma_hw *hw, struct rte_ring *ring)
+{
+	struct skeldma_desc *desc = NULL;
+	while (rte_ring_count(ring) > 0) {
+		(void)rte_ring_dequeue(ring, (void **)&desc);
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+}
+
+static int
+skeldma_start(struct rte_dmadev *dev)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	rte_cpuset_t cpuset;
+	int ret;
+
+	if (hw->desc_mem == NULL) {
+		SKELDMA_LOG(ERR, "Vchan was not setup, start fail!");
+		return -EINVAL;
+	}
+
+	/* Reset the dmadev to a known state, include:
+	 * 1) fflush pending/running/completed ring to empty ring.
+	 * 2) init ring idx to zero.
+	 * 3) init running statistics.
+	 * 4) mark cpucopy task exit_flag to false.
+	 */
+	fflush_ring(hw, hw->desc_pending);
+	fflush_ring(hw, hw->desc_running);
+	fflush_ring(hw, hw->desc_completed);
+	hw->ridx = 0;
+	hw->submitted_count = 0;
+	hw->zero_req_count = 0;
+	hw->completed_count = 0;
+	hw->exit_flag = false;
+
+	rte_mb();
+
+	ret = rte_ctrl_thread_create(&hw->thread, "dma_skeleton", NULL,
+				     cpucopy_thread, dev);
+	if (ret) {
+		SKELDMA_LOG(ERR, "Start cpucopy thread fail!");
+		return -EINVAL;
+	}
+
+	if (hw->lcore_id != -1) {
+		cpuset = rte_lcore_cpuset(hw->lcore_id);
+		ret = pthread_setaffinity_np(hw->thread, sizeof(cpuset),
+					     &cpuset);
+		if (ret)
+			SKELDMA_LOG(WARNING,
+				"Set thread affinity lcore = %u fail!",
+				hw->lcore_id);
+	}
+
+	return 0;
+}
+
+static int
+skeldma_stop(struct rte_dmadev *dev)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	hw->exit_flag = true;
+	rte_delay_ms(1);
+
+	pthread_cancel(hw->thread);
+	pthread_join(hw->thread, NULL);
+
+	return 0;
+}
+
+static int
+vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc)
+{
+	struct skeldma_desc *desc;
+	struct rte_ring *empty;
+	struct rte_ring *pending;
+	struct rte_ring *running;
+	struct rte_ring *completed;
+	uint16_t i;
+
+	desc = rte_zmalloc_socket("dma_skelteon_desc",
+				  nb_desc * sizeof(struct skeldma_desc),
+				  RTE_CACHE_LINE_SIZE, hw->socket_id);
+	if (desc == NULL) {
+		SKELDMA_LOG(ERR, "Malloc dma skeleton desc fail!");
+		return -ENOMEM;
+	}
+
+	empty = rte_ring_create("dma_skeleton_desc_empty", nb_desc,
+				hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	pending = rte_ring_create("dma_skeleton_desc_pending", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	running = rte_ring_create("dma_skeleton_desc_running", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	completed = rte_ring_create("dma_skeleton_desc_completed", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (empty == NULL || pending == NULL || running == NULL ||
+	    completed == NULL) {
+		SKELDMA_LOG(ERR, "Create dma skeleton desc ring fail!");
+		rte_ring_free(empty);
+		rte_ring_free(pending);
+		rte_ring_free(running);
+		rte_ring_free(completed);
+		rte_free(desc);
+		return -ENOMEM;
+	}
+
+	/* The real usable ring size is *count-1* instead of *count* to
+	 * differentiate a free ring from an empty ring.
+	 * @see rte_ring_create
+	 */
+	for (i = 0; i < nb_desc - 1; i++)
+		(void)rte_ring_enqueue(empty, (void *)(desc + i));
+
+	hw->desc_mem = desc;
+	hw->desc_empty = empty;
+	hw->desc_pending = pending;
+	hw->desc_running = running;
+	hw->desc_completed = completed;
+
+	return 0;
+}
+
+static void
+vchan_release(struct skeldma_hw *hw)
+{
+	if (hw->desc_mem == NULL)
+		return;
+
+	rte_free(hw->desc_mem);
+	hw->desc_mem = NULL;
+	rte_ring_free(hw->desc_empty);
+	hw->desc_empty = NULL;
+	rte_ring_free(hw->desc_pending);
+	hw->desc_pending = NULL;
+	rte_ring_free(hw->desc_running);
+	hw->desc_running = NULL;
+	rte_ring_free(hw->desc_completed);
+	hw->desc_completed = NULL;
+}
+
+static int
+skeldma_close(struct rte_dmadev *dev)
+{
+	/* The device already stopped */
+	vchan_release(dev->dev_private);
+	return 0;
+}
+
+static int
+skeldma_vchan_setup(struct rte_dmadev *dev, uint16_t vchan,
+		    const struct rte_dmadev_vchan_conf *conf)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+
+	if (!rte_is_power_of_2(conf->nb_desc)) {
+		SKELDMA_LOG(ERR, "Number of desc must be power of 2!");
+		return -EINVAL;
+	}
+
+	vchan_release(hw);
+	return vchan_setup(hw, conf->nb_desc);
+}
+
+static int
+skeldma_stats_get(const struct rte_dmadev *dev, uint16_t vchan,
+		  struct rte_dmadev_stats *stats, uint32_t stats_sz)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(stats_sz);
+
+	stats->submitted = hw->submitted_count;
+	stats->completed = hw->completed_count;
+	stats->errors = 0;
+
+	return 0;
+}
+
+static int
+skeldma_stats_reset(struct rte_dmadev *dev, uint16_t vchan)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+
+	hw->submitted_count = 0;
+	hw->completed_count = 0;
+
+	return 0;
+}
+
+static int
+skeldma_dump(const struct rte_dmadev *dev, FILE *f)
+{
+#define GET_RING_COUNT(ring)	((ring) ? (rte_ring_count(ring)) : 0)
+
+	struct skeldma_hw *hw = dev->dev_private;
+
+	fprintf(f,
+		"  lcore_id: %d\n"
+		"  socket_id: %d\n"
+		"  desc_empty_ring_count: %u\n"
+		"  desc_pending_ring_count: %u\n"
+		"  desc_running_ring_count: %u\n"
+		"  desc_completed_ring_count: %u\n",
+		hw->lcore_id, hw->socket_id,
+		GET_RING_COUNT(hw->desc_empty),
+		GET_RING_COUNT(hw->desc_pending),
+		GET_RING_COUNT(hw->desc_running),
+		GET_RING_COUNT(hw->desc_completed));
+	fprintf(f,
+		"  next_ring_idx: %u\n"
+		"  submitted_count: %" PRIu64 "\n"
+		"  completed_count: %" PRIu64 "\n",
+		hw->ridx, hw->submitted_count, hw->completed_count);
+
+	return 0;
+}
+
+static inline void
+submit(struct skeldma_hw *hw, struct skeldma_desc *desc)
+{
+	uint16_t count = rte_ring_count(hw->desc_pending);
+	struct skeldma_desc *pend_desc = NULL;
+
+	while (count > 0) {
+		(void)rte_ring_dequeue(hw->desc_pending, (void **)&pend_desc);
+		(void)rte_ring_enqueue(hw->desc_running, (void *)pend_desc);
+		count--;
+	}
+
+	if (desc)
+		(void)rte_ring_enqueue(hw->desc_running, (void *)desc);
+}
+
+static int
+skeldma_copy(struct rte_dmadev *dev, uint16_t vchan,
+	     rte_iova_t src, rte_iova_t dst,
+	     uint32_t length, uint64_t flags)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc;
+	int ret;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(flags);
+
+	ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc);
+	if (ret)
+		return -ENOSPC;
+	desc->src = (void *)(uintptr_t)src;
+	desc->dst = (void *)(uintptr_t)dst;
+	desc->len = length;
+	desc->ridx = hw->ridx;
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT)
+		submit(hw, desc);
+	else
+		(void)rte_ring_enqueue(hw->desc_pending, (void *)desc);
+	hw->submitted_count++;
+
+	return hw->ridx++;
+}
+
+static int
+skeldma_submit(struct rte_dmadev *dev, uint16_t vchan)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	RTE_SET_USED(vchan);
+	submit(hw, NULL);
+	return 0;
+}
+
+static uint16_t
+skeldma_completed(struct rte_dmadev *dev,
+		  uint16_t vchan, const uint16_t nb_cpls,
+		  uint16_t *last_idx, bool *has_error)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	uint16_t index = 0;
+	uint16_t count;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(has_error);
+
+	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+	while (index < count) {
+		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+		if (index == count - 1)
+			*last_idx = desc->ridx;
+		index++;
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+
+	return count;
+}
+
+static uint16_t
+skeldma_completed_status(struct rte_dmadev *dev,
+			 uint16_t vchan, const uint16_t nb_cpls,
+			 uint16_t *last_idx, enum rte_dma_status_code *status)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	uint16_t index = 0;
+	uint16_t count;
+
+	RTE_SET_USED(vchan);
+
+	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+	while (index < count) {
+		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+		if (index == count - 1)
+			*last_idx = desc->ridx;
+		status[index++] = RTE_DMA_STATUS_SUCCESSFUL;
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+
+	return count;
+}
+
+static const struct rte_dmadev_ops skeldma_ops = {
+	.dev_info_get = skeldma_info_get,
+	.dev_configure = skeldma_configure,
+	.dev_start = skeldma_start,
+	.dev_stop = skeldma_stop,
+	.dev_close = skeldma_close,
+
+	.vchan_setup = skeldma_vchan_setup,
+
+	.stats_get = skeldma_stats_get,
+	.stats_reset = skeldma_stats_reset,
+
+	.dev_dump = skeldma_dump,
+};
+
+static int
+skeldma_create(const char *name, struct rte_vdev_device *vdev, int lcore_id)
+{
+	struct rte_dmadev *dev;
+	struct skeldma_hw *hw;
+	int socket_id;
+
+	dev = rte_dmadev_pmd_allocate(name);
+	if (dev == NULL) {
+		SKELDMA_LOG(ERR, "Unable to allocate dmadev: %s", name);
+		return -EINVAL;
+	}
+
+	socket_id = (lcore_id < 0) ? rte_socket_id() :
+				     rte_lcore_to_socket_id(lcore_id);
+	dev->dev_private = rte_zmalloc_socket("dmadev private",
+					      sizeof(struct skeldma_hw),
+					      RTE_CACHE_LINE_SIZE,
+					      socket_id);
+	if (!dev->dev_private) {
+		SKELDMA_LOG(ERR, "Unable to allocate device private memory");
+		(void)rte_dmadev_pmd_release(dev);
+		return -ENOMEM;
+	}
+
+	dev->copy = skeldma_copy;
+	dev->submit = skeldma_submit;
+	dev->completed = skeldma_completed;
+	dev->completed_status = skeldma_completed_status;
+	dev->dev_ops = &skeldma_ops;
+	dev->device = &vdev->device;
+
+	hw = dev->dev_private;
+	hw->lcore_id = lcore_id;
+	hw->socket_id = socket_id;
+
+	return dev->data->dev_id;
+}
+
+static int
+skeldma_destroy(const char *name)
+{
+	struct rte_dmadev *dev;
+	int ret;
+
+	dev = rte_dmadev_get_device_by_name(name);
+	if (!dev)
+		return -EINVAL;
+
+	ret = rte_dmadev_close(dev->data->dev_id);
+	if (ret)
+		return ret;
+
+	rte_free(dev->dev_private);
+	dev->dev_private = NULL;
+	(void)rte_dmadev_pmd_release(dev);
+
+	return 0;
+}
+
+static int
+skeldma_parse_lcore(const char *key __rte_unused,
+		    const char *value,
+		    void *opaque)
+{
+	int lcore_id = atoi(value);
+	if (lcore_id >= 0 && lcore_id < RTE_MAX_LCORE)
+		*(int *)opaque = lcore_id;
+	return 0;
+}
+
+static void
+skeldma_parse_vdev_args(struct rte_vdev_device *vdev, int *lcore_id)
+{
+	static const char *const args[] = {
+		SKELDMA_ARG_LCORE,
+		NULL
+	};
+
+	struct rte_kvargs *kvlist;
+	const char *params;
+
+	params = rte_vdev_device_args(vdev);
+	if (params == NULL || params[0] == '\0')
+		return;
+
+	kvlist = rte_kvargs_parse(params, args);
+	if (!kvlist)
+		return;
+
+	(void)rte_kvargs_process(kvlist, SKELDMA_ARG_LCORE,
+				 skeldma_parse_lcore, lcore_id);
+	SKELDMA_LOG(INFO, "Parse lcore_id = %d", *lcore_id);
+
+	rte_kvargs_free(kvlist);
+}
+
+static int
+skeldma_probe(struct rte_vdev_device *vdev)
+{
+	const char *name;
+	int lcore_id = -1;
+	int ret;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		SKELDMA_LOG(ERR, "Multiple process not supported for %s", name);
+		return -EINVAL;
+	}
+
+	/* More than one instance is not supported */
+	if (skeldma_init_once) {
+		SKELDMA_LOG(ERR, "Multiple instance not supported for %s",
+			name);
+		return -EINVAL;
+	}
+
+	skeldma_parse_vdev_args(vdev, &lcore_id);
+
+	ret = skeldma_create(name, vdev, lcore_id);
+	if (ret >= 0) {
+		SKELDMA_LOG(INFO, "Create %s dmadev with lcore-id %d",
+			name, lcore_id);
+		/* Device instance created; Second instance not possible */
+		skeldma_init_once = 1;
+	}
+
+	return ret < 0 ? ret : 0;
+}
+
+static int
+skeldma_remove(struct rte_vdev_device *vdev)
+{
+	const char *name;
+	int ret;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -1;
+
+	ret = skeldma_destroy(name);
+	if (!ret) {
+		skeldma_init_once = 0;
+		SKELDMA_LOG(INFO, "Remove %s dmadev", name);
+	}
+
+	return ret;
+}
+
+static struct rte_vdev_driver skeldma_pmd_drv = {
+	.probe = skeldma_probe,
+	.remove = skeldma_remove,
+	.drv_flags = RTE_VDEV_DRV_NEED_IOVA_AS_VA,
+};
+
+RTE_PMD_REGISTER_VDEV(dma_skeleton, skeldma_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(dma_skeleton,
+		SKELDMA_ARG_LCORE "=<uint16> ");
diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h
new file mode 100644
index 0000000..46ff000
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef __SKELETON_DMADEV_H__
+#define __SKELETON_DMADEV_H__
+
+#include <rte_dmadev.h>
+
+#define SKELDMA_ARG_LCORE	"lcore"
+
+struct skeldma_desc {
+	void *src;
+	void *dst;
+	uint32_t len;
+	uint16_t ridx; /* ring idx */
+};
+
+struct skeldma_hw {
+	int lcore_id; /* cpucopy task affinity core */
+	int socket_id;
+	pthread_t thread; /* cpucopy task thread */
+	volatile int exit_flag; /* cpucopy task exit flag */
+
+	struct skeldma_desc *desc_mem;
+
+	/* Descriptor ring state machine:
+	 *
+	 *  -----------     enqueue without submit     -----------
+	 *  |  empty  |------------------------------->| pending |
+	 *  -----------\                               -----------
+	 *       ^      \------------                       |
+	 *       |                  |                       |submit doorbell
+	 *       |                  |                       |
+	 *       |                  |enqueue with submit    |
+	 *       |get completed     |------------------|    |
+	 *       |                                     |    |
+	 *       |                                     v    v
+	 *  -----------     cpucopy thread working     -----------
+	 *  |completed|<-------------------------------| running |
+	 *  -----------                                -----------
+	 */
+	struct rte_ring *desc_empty;
+	struct rte_ring *desc_pending;
+	struct rte_ring *desc_running;
+	struct rte_ring *desc_completed;
+
+	/* Cache delimiter for dataplane API's operation data */
+	char cache1 __rte_cache_aligned;
+	uint16_t ridx;  /* ring idx */
+	uint64_t submitted_count;
+
+	/* Cache delimiter for cpucopy thread's operation data */
+	char cache2 __rte_cache_aligned;
+	uint32_t zero_req_count;
+	uint64_t completed_count;
+};
+
+#endif /* __SKELETON_DMADEV_H__ */
diff --git a/drivers/dma/skeleton/version.map b/drivers/dma/skeleton/version.map
new file mode 100644
index 0000000..c2e0723
--- /dev/null
+++ b/drivers/dma/skeleton/version.map
@@ -0,0 +1,3 @@
+DPDK_22 {
+	local: *;
+};
diff --git a/drivers/meson.build b/drivers/meson.build
index d9e331e..a390787 100644
--- a/drivers/meson.build
+++ b/drivers/meson.build
@@ -18,6 +18,7 @@ subdirs = [
         'vdpa',           # depends on common, bus and mempool.
         'event',          # depends on common, bus, mempool and net.
         'baseband',       # depends on common and bus.
+        'dma',            # depends on common and bus.
 ]
 
 if meson.is_cross_build()
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v19 7/7] app/test: add dmadev API test
  2021-09-02 13:13 ` [dpdk-dev] [PATCH v19 0/7] support dmadev Chengwen Feng
                     ` (5 preceding siblings ...)
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 6/7] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
@ 2021-09-02 13:13   ` Chengwen Feng
  2021-09-02 14:11     ` Walsh, Conor
  2021-09-03 15:14     ` Kevin Laatz
  6 siblings, 2 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-02 13:13 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

This patch add dmadev API test which based on 'dma_skeleton' vdev. The
test cases could be executed using 'dmadev_autotest' command in test
framework.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 MAINTAINERS                |   1 +
 app/test/meson.build       |   4 +
 app/test/test_dmadev.c     |  45 ++++
 app/test/test_dmadev_api.c | 532 +++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 582 insertions(+)
 create mode 100644 app/test/test_dmadev.c
 create mode 100644 app/test/test_dmadev_api.c

diff --git a/MAINTAINERS b/MAINTAINERS
index e69fb28..17763c8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -500,6 +500,7 @@ DMA device API - EXPERIMENTAL
 M: Chengwen Feng <fengchengwen@huawei.com>
 F: lib/dmadev/
 F: drivers/dma/skeleton/
+F: app/test/test_dmadev*
 F: doc/guides/prog_guide/dmadev.rst
 
 
diff --git a/app/test/meson.build b/app/test/meson.build
index a761168..9027eba 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -43,6 +43,8 @@ test_sources = files(
         'test_debug.c',
         'test_distributor.c',
         'test_distributor_perf.c',
+        'test_dmadev.c',
+        'test_dmadev_api.c',
         'test_eal_flags.c',
         'test_eal_fs.c',
         'test_efd.c',
@@ -162,6 +164,7 @@ test_deps = [
         'cmdline',
         'cryptodev',
         'distributor',
+        'dmadev',
         'efd',
         'ethdev',
         'eventdev',
@@ -333,6 +336,7 @@ driver_test_names = [
         'cryptodev_sw_mvsam_autotest',
         'cryptodev_sw_snow3g_autotest',
         'cryptodev_sw_zuc_autotest',
+        'dmadev_autotest',
         'eventdev_selftest_octeontx',
         'eventdev_selftest_sw',
         'rawdev_autotest',
diff --git a/app/test/test_dmadev.c b/app/test/test_dmadev.c
new file mode 100644
index 0000000..bb01e86
--- /dev/null
+++ b/app/test/test_dmadev.c
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_dmadev.h>
+#include <rte_bus_vdev.h>
+
+#include "test.h"
+
+/* from test_dmadev_api.c */
+extern int test_dmadev_api(uint16_t dev_id);
+
+static int
+test_apis(void)
+{
+	const char *pmd = "dma_skeleton";
+	int id;
+	int ret;
+
+	if (rte_vdev_init(pmd, NULL) < 0)
+		return TEST_SKIPPED;
+	id = rte_dmadev_get_dev_id(pmd);
+	if (id < 0)
+		return TEST_SKIPPED;
+	printf("\n### Test dmadev infrastructure using skeleton driver\n");
+	ret = test_dmadev_api(id);
+	rte_vdev_uninit(pmd);
+
+	return ret;
+}
+
+static int
+test_dmadev(void)
+{
+	/* basic sanity on dmadev infrastructure */
+	if (test_apis() < 0)
+		return -1;
+
+	return 0;
+}
+
+REGISTER_TEST_COMMAND(dmadev_autotest, test_dmadev);
diff --git a/app/test/test_dmadev_api.c b/app/test/test_dmadev_api.c
new file mode 100644
index 0000000..a7795eb
--- /dev/null
+++ b/app/test/test_dmadev_api.c
@@ -0,0 +1,532 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_test.h>
+#include <rte_dmadev.h>
+
+extern int test_dmadev_api(uint16_t dev_id);
+
+#define SKELDMA_TEST_RUN(test) \
+	testsuite_run_test(test, #test)
+
+#define TEST_MEMCPY_SIZE	1024
+#define TEST_WAIT_US_VAL	50000
+
+#define TEST_SUCCESS 0
+#define TEST_FAILED  -1
+
+static uint16_t test_dev_id;
+static uint16_t invalid_dev_id;
+
+static int total;
+static int passed;
+static int failed;
+static char *src;
+static char *dst;
+
+static int
+testsuite_setup(uint16_t dev_id)
+{
+	test_dev_id = dev_id;
+	invalid_dev_id = RTE_DMADEV_MAX_DEVS;
+
+	src = rte_malloc("dmadev_test_src", TEST_MEMCPY_SIZE, 0);
+	if (src == NULL)
+		return -ENOMEM;
+	dst = rte_malloc("dmadev_test_dst", TEST_MEMCPY_SIZE, 0);
+	if (dst == NULL)
+		return -ENOMEM;
+
+	total = 0;
+	passed = 0;
+	failed = 0;
+
+	return 0;
+}
+
+static void
+testsuite_teardown(void)
+{
+	rte_free(src);
+	rte_free(dst);
+	/* Ensure the dmadev is stopped. */
+	rte_dmadev_stop(test_dev_id);
+}
+
+static void
+testsuite_run_test(int (*test)(void), const char *name)
+{
+	int ret = 0;
+
+	if (test) {
+		ret = test();
+		if (ret < 0) {
+			failed++;
+			printf("%s Failed\n", name);
+		} else {
+			passed++;
+			printf("%s Passed\n", name);
+		}
+	}
+
+	total++;
+}
+
+static int
+test_dmadev_get_dev_id(void)
+{
+	int ret = rte_dmadev_get_dev_id("invalid_dmadev_device");
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_is_valid_dev(void)
+{
+	int ret;
+	ret = rte_dmadev_is_valid_dev(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == false, "Expected false for invalid dev id");
+	ret = rte_dmadev_is_valid_dev(test_dev_id);
+	RTE_TEST_ASSERT(ret == true, "Expected true for valid dev id");
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_count(void)
+{
+	uint16_t count = rte_dmadev_count();
+	RTE_TEST_ASSERT(count > 0, "Invalid dmadev count %u", count);
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_info_get(void)
+{
+	struct rte_dmadev_info info =  { 0 };
+	int ret;
+
+	ret = rte_dmadev_info_get(invalid_dev_id, &info);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_info_get(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_configure(void)
+{
+	struct rte_dmadev_conf conf = { 0 };
+	struct rte_dmadev_info info = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_configure(invalid_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_configure(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for nb_vchans == 0 */
+	memset(&conf, 0, sizeof(conf));
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for conf.nb_vchans > info.max_vchans */
+	ret = rte_dmadev_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans + 1;
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check enable silent mode */
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans;
+	conf.enable_silent = true;
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Configure success */
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans;
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret);
+
+	/* Check configure success */
+	ret = rte_dmadev_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	RTE_TEST_ASSERT_EQUAL(conf.nb_vchans, info.nb_vchans,
+			      "Configure nb_vchans not match");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_vchan_setup(void)
+{
+	struct rte_dmadev_vchan_conf vchan_conf = { 0 };
+	struct rte_dmadev_conf dev_conf = { 0 };
+	struct rte_dmadev_info dev_info = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_vchan_setup(invalid_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Make sure configure success */
+	ret = rte_dmadev_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	dev_conf.nb_vchans = dev_info.max_vchans;
+	ret = rte_dmadev_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret);
+
+	/* Check for invalid vchan */
+	ret = rte_dmadev_vchan_setup(test_dev_id, dev_conf.nb_vchans,
+				     &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for direction */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV + 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM - 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for direction and dev_capa combination */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_DEV;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_MEM;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for nb_desc validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc - 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.nb_desc = dev_info.max_desc + 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check src port type validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	vchan_conf.src_port.port_type = RTE_DMADEV_PORT_PCIE;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check dst port type validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	vchan_conf.dst_port.port_type = RTE_DMADEV_PORT_PCIE;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check vchan setup success */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+setup_one_vchan(void)
+{
+	struct rte_dmadev_vchan_conf vchan_conf = { 0 };
+	struct rte_dmadev_info dev_info = { 0 };
+	struct rte_dmadev_conf dev_conf = { 0 };
+	int ret;
+
+	ret = rte_dmadev_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+	dev_conf.nb_vchans = dev_info.max_vchans;
+	ret = rte_dmadev_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_start_stop(void)
+{
+	struct rte_dmadev_vchan_conf vchan_conf = { 0 };
+	struct rte_dmadev_conf dev_conf = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_start(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stop(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dmadev_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Check reconfigure and vchan setup when device started */
+	ret = rte_dmadev_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT(ret == -EBUSY, "Failed to configure, %d", ret);
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EBUSY, "Failed to setup vchan, %d", ret);
+
+	ret = rte_dmadev_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_stats(void)
+{
+	struct rte_dmadev_info dev_info = { 0 };
+	struct rte_dmadev_stats stats = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_stats_get(invalid_dev_id, 0, &stats);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stats_get(invalid_dev_id, 0, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stats_reset(invalid_dev_id, 0);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	/* Check for invalid vchan */
+	ret = rte_dmadev_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+	ret = rte_dmadev_stats_get(test_dev_id, dev_info.max_vchans, &stats);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stats_reset(test_dev_id, dev_info.max_vchans);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for valid vchan */
+	ret = rte_dmadev_stats_get(test_dev_id, 0, &stats);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get stats, %d", ret);
+	ret = rte_dmadev_stats_get(test_dev_id, RTE_DMADEV_ALL_VCHAN, &stats);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get all stats, %d", ret);
+	ret = rte_dmadev_stats_reset(test_dev_id, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset stats, %d", ret);
+	ret = rte_dmadev_stats_reset(test_dev_id, RTE_DMADEV_ALL_VCHAN);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset all stats, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_dump(void)
+{
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_dump(invalid_dev_id, stderr);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Excepted -EINVAL, %d", ret);
+	ret = rte_dmadev_dump(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Excepted -EINVAL, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_completed(void)
+{
+	uint16_t last_idx = 1;
+	bool has_error = true;
+	uint16_t cpl_ret;
+	int ret, i;
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dmadev_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Setup test memory */
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++)
+		src[i] = (char)i;
+	memset(dst, 0, TEST_MEMCPY_SIZE);
+
+	/* Check enqueue without submit */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, 0);
+	RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed(test_dev_id, 0, 1, &last_idx,
+				       &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to get completed");
+
+	/* Check add submit */
+	ret = rte_dmadev_submit(test_dev_id, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to submit, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed(test_dev_id, 0, 1, &last_idx,
+				       &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u",
+				last_idx);
+	RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++) {
+		if (src[i] != dst[i]) {
+			RTE_TEST_ASSERT_EQUAL(src[i], dst[i],
+				"Failed to copy memory, %d %d", src[i], dst[i]);
+			break;
+		}
+	}
+
+	/* Setup test memory */
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++)
+		src[i] = (char)i;
+	memset(dst, 0, TEST_MEMCPY_SIZE);
+
+	/* Check for enqueue with submit */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 1, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed(test_dev_id, 0, 1, &last_idx,
+				       &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u",
+				last_idx);
+	RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++) {
+		if (src[i] != dst[i]) {
+			RTE_TEST_ASSERT_EQUAL(src[i], dst[i],
+				"Failed to copy memory, %d %d", src[i], dst[i]);
+			break;
+		}
+	}
+
+	/* Stop dmadev to make sure dmadev to a known state */
+	ret = rte_dmadev_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_completed_status(void)
+{
+	enum rte_dma_status_code status[1] = { 1 };
+	uint16_t last_idx = 1;
+	uint16_t cpl_ret, i;
+	int ret;
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dmadev_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Check for enqueue with submit */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed_status(test_dev_id, 0, 1, &last_idx,
+					      status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u",
+				last_idx);
+	for (i = 0; i < RTE_DIM(status); i++)
+		RTE_TEST_ASSERT_EQUAL(status[i], 0,
+				"Failed to completed status, %d", status[i]);
+
+	/* Check do completed status again */
+	cpl_ret = rte_dmadev_completed_status(test_dev_id, 0, 1, &last_idx,
+					      status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to completed status");
+
+	/* Check for enqueue with submit again */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 1, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed_status(test_dev_id, 0, 1, &last_idx,
+					      status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u",
+				last_idx);
+	for (i = 0; i < RTE_DIM(status); i++)
+		RTE_TEST_ASSERT_EQUAL(status[i], 0,
+				"Failed to completed status, %d", status[i]);
+
+	/* Stop dmadev to make sure dmadev to a known state */
+	ret = rte_dmadev_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+int
+test_dmadev_api(uint16_t dev_id)
+{
+	int ret = testsuite_setup(dev_id);
+	if (ret) {
+		printf("testsuite setup fail!\n");
+		return -1;
+	}
+
+	/* If the testcase exit successfully, ensure that the test dmadev exist
+	 * and the dmadev is in the stopped state.
+	 */
+	SKELDMA_TEST_RUN(test_dmadev_get_dev_id);
+	SKELDMA_TEST_RUN(test_dmadev_is_valid_dev);
+	SKELDMA_TEST_RUN(test_dmadev_count);
+	SKELDMA_TEST_RUN(test_dmadev_info_get);
+	SKELDMA_TEST_RUN(test_dmadev_configure);
+	SKELDMA_TEST_RUN(test_dmadev_vchan_setup);
+	SKELDMA_TEST_RUN(test_dmadev_start_stop);
+	SKELDMA_TEST_RUN(test_dmadev_stats);
+	SKELDMA_TEST_RUN(test_dmadev_dump);
+	SKELDMA_TEST_RUN(test_dmadev_completed);
+	SKELDMA_TEST_RUN(test_dmadev_completed_status);
+
+	testsuite_teardown();
+
+	printf("Total tests   : %d\n", total);
+	printf("Passed        : %d\n", passed);
+	printf("Failed        : %d\n", failed);
+
+	if (failed)
+		return -1;
+
+	return 0;
+};
-- 
2.8.1


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v18 8/8] maintainers: add for dmadev
  2021-09-02 11:51     ` Bruce Richardson
@ 2021-09-02 13:39       ` fengchengwen
  2021-09-03 12:59         ` Maxime Coquelin
  0 siblings, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-09-02 13:39 UTC (permalink / raw)
  To: Bruce Richardson, Li, Xiaoyun
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, Maxime Coquelin, chenbo.xia

Fix in v19

I think there many patches wait for dmadev framework upstream, so
could you help review unreviewd patches (like dma/skeleton and app/test)?

Also, we have developed the dma driver for hisilicon, and corresponding test
app (like examples/vhost and testpmd) is being developed.

examples/vhost: will extend support dmadev type.
testpmd: will introduces some dma control commands and the dma forwarding mode.
         the dma forwarding mode process:
              // 1st: call rte_eth_rx_burst
              // 2st: post received packet to dma, move data to another packet
              // 3st: set newcopyed rte_mbuf header
              // 4st: free received packet
              // 5st: get dma completed request and associate it rte_mbuf
              // 6st: send 5st's rte_mbuf to nic

@Maxime @Chenbo @Xiaoyun Like to hear your opinion.


On 2021/9/2 19:51, Bruce Richardson wrote:
> On Thu, Sep 02, 2021 at 06:54:17PM +0800, Chengwen Feng wrote:
>> This patch add myself as dmadev's maintainer and update release notes.
>>
>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com> ---
> 
> Just in case you are doing any further revisions of this patchset, the
> maintainers entry, and RN entry, generally is added in the first patch, so
> squash this patch into patch #1.
> .
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 7/7] app/test: add dmadev API test
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 7/7] app/test: add dmadev API test Chengwen Feng
@ 2021-09-02 14:11     ` Walsh, Conor
  2021-09-03  0:39       ` fengchengwen
  2021-09-03 15:14     ` Kevin Laatz
  1 sibling, 1 reply; 339+ messages in thread
From: Walsh, Conor @ 2021-09-02 14:11 UTC (permalink / raw)
  To: Chengwen Feng, thomas, Yigit, Ferruh, Richardson,  Bruce, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor, Ananyev,
	Konstantin, Laatz, Kevin

Hi Chengwen,

The output from the API tests is not very straightforward to interpret if you are not familiar with these tests.
Could we change the log level of the dmadev library before and after the API tests using something similar to 
The code I have included inline below?

> +static int
> +testsuite_setup(uint16_t dev_id)
> +{
> +	test_dev_id = dev_id;
> +	invalid_dev_id = RTE_DMADEV_MAX_DEVS;
> +
> +	src = rte_malloc("dmadev_test_src", TEST_MEMCPY_SIZE, 0);
> +	if (src == NULL)
> +		return -ENOMEM;
> +	dst = rte_malloc("dmadev_test_dst", TEST_MEMCPY_SIZE, 0);
> +	if (dst == NULL)
> +		return -ENOMEM;
	
	/* Set dmadev log level to critical to suppress unnecessary output during API tests. */
	rte_log_set_level_pattern("lib.dmadev", RTE_LOG_CRIT);
> +
> +	total = 0;
> +	passed = 0;
> +	failed = 0;
> +
> +	return 0;
> +}
> +
> +static void
> +testsuite_teardown(void)
> +{
> +	rte_free(src);
> +	rte_free(dst);
> +	/* Ensure the dmadev is stopped. */
> +	rte_dmadev_stop(test_dev_id);
	rte_log_set_level_pattern("lib.dmadev", RTE_LOG_INFO);
> +}

This change would bring your output down from:

### Test dmadev infrastructure using skeleton driver
test_dmadev_get_dev_id Passed
test_dmadev_is_valid_dev Passed
test_dmadev_count Passed
Invalid dev_id=64
test_dmadev_info_get Passed
Invalid dev_id=64
Device 1 configure zero vchans
Device 1 configure too many vchans
Device 1 don't support silent
test_dmadev_configure Passed
Invalid dev_id=64
Device 1 number of descriptors invalid
Device 1 vchan out range!
Device 1 direction invalid!
Device 1 direction invalid!
Device 1 don't support mem2dev transfer
Device 1 don't support dev2mem transfer
Device 1 don't support dev2dev transfer
Device 1 number of descriptors invalid
Device 1 number of descriptors invalid
Device 1 source port type invalid
Device 1 destination port type invalid
test_dmadev_vchan_setup Passed
Invalid dev_id=64
Invalid dev_id=64
Device 1 must be stopped to allow configuration
Device 1 must be stopped to allow configuration
test_dmadev_start_stop Passed
Invalid dev_id=64
Invalid dev_id=64
Invalid dev_id=64
Device 1 vchan 1 out of range
Device 1 vchan 1 out of range
test_dmadev_stats Passed
test_dmadev_completed Passed
test_dmadev_completed_status Passed
Device 1 already stopped
Total tests   : 10
Passed        : 10
Failed        : 0
skeldma_remove(): Remove dma_skeleton dmadev

To:

### Test dmadev infrastructure using skeleton driver
test_dmadev_get_dev_id Passed
test_dmadev_is_valid_dev Passed
test_dmadev_count Passed
test_dmadev_info_get Passed
test_dmadev_configure Passed
test_dmadev_vchan_setup Passed
test_dmadev_start_stop Passed
test_dmadev_stats Passed
test_dmadev_completed Passed
test_dmadev_completed_status Passed
Total tests   : 10
Passed        : 10
Failed        : 0
skeldma_remove(): Remove dma_skeleton dmadev

Thanks,
Conor.

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 7/7] app/test: add dmadev API test
  2021-09-02 14:11     ` Walsh, Conor
@ 2021-09-03  0:39       ` fengchengwen
  2021-09-03 15:38         ` Walsh, Conor
  0 siblings, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-09-03  0:39 UTC (permalink / raw)
  To: Walsh, Conor, thomas, Yigit, Ferruh, Richardson,  Bruce, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor, Ananyev,
	Konstantin, Laatz, Kevin

This is a tradeoff point. If we changed the log level of dmadev, it is difficult to know where the test case fails.

So I prefer add more meaningful information, at least print out the function name.

And V19 add format function name in log default, so the rte_dmadev's log will show like:

rte_dmadev_configure(): Invalid dev_id=64
rte_dmadev_configure(): Device 4 configure zero vchans
rte_dmadev_configure(): Device 4 configure too many vchans
rte_dmadev_configure(): Device 4 don't support silent
test_dmadev_configure Passed
rte_dmadev_vchan_setup(): Invalid dev_id=64
rte_dmadev_vchan_setup(): Device 4 number of descriptors invalid
rte_dmadev_vchan_setup(): Device 4 vchan out range!
rte_dmadev_vchan_setup(): Device 4 direction invalid!
rte_dmadev_vchan_setup(): Device 4 direction invalid!
rte_dmadev_vchan_setup(): Device 4 don't support mem2dev transfer
rte_dmadev_vchan_setup(): Device 4 don't support dev2mem transfer
rte_dmadev_vchan_setup(): Device 4 don't support dev2dev transfer
rte_dmadev_vchan_setup(): Device 4 number of descriptors invalid
rte_dmadev_vchan_setup(): Device 4 number of descriptors invalid
rte_dmadev_vchan_setup(): Device 4 source port type invalid
rte_dmadev_vchan_setup(): Device 4 destination port type invalid


On 2021/9/2 22:11, Walsh, Conor wrote:
> Hi Chengwen,
> 
> The output from the API tests is not very straightforward to interpret if you are not familiar with these tests.
> Could we change the log level of the dmadev library before and after the API tests using something similar to 
> The code I have included inline below?
> 
>> +static int
>> +testsuite_setup(uint16_t dev_id)
>> +{
>> +	test_dev_id = dev_id;
>> +	invalid_dev_id = RTE_DMADEV_MAX_DEVS;
>> +
>> +	src = rte_malloc("dmadev_test_src", TEST_MEMCPY_SIZE, 0);
>> +	if (src == NULL)
>> +		return -ENOMEM;
>> +	dst = rte_malloc("dmadev_test_dst", TEST_MEMCPY_SIZE, 0);
>> +	if (dst == NULL)
>> +		return -ENOMEM;
> 	
> 	/* Set dmadev log level to critical to suppress unnecessary output during API tests. */
> 	rte_log_set_level_pattern("lib.dmadev", RTE_LOG_CRIT);
>> +
>> +	total = 0;
>> +	passed = 0;
>> +	failed = 0;
>> +
>> +	return 0;
>> +}
>> +
>> +static void
>> +testsuite_teardown(void)
>> +{
>> +	rte_free(src);
>> +	rte_free(dst);
>> +	/* Ensure the dmadev is stopped. */
>> +	rte_dmadev_stop(test_dev_id);
> 	rte_log_set_level_pattern("lib.dmadev", RTE_LOG_INFO);
>> +}
> 
> This change would bring your output down from:
> 
> ### Test dmadev infrastructure using skeleton driver
> test_dmadev_get_dev_id Passed
> test_dmadev_is_valid_dev Passed
> test_dmadev_count Passed
> Invalid dev_id=64
> test_dmadev_info_get Passed
> Invalid dev_id=64
> Device 1 configure zero vchans
> Device 1 configure too many vchans
> Device 1 don't support silent
> test_dmadev_configure Passed
> Invalid dev_id=64
> Device 1 number of descriptors invalid
> Device 1 vchan out range!
> Device 1 direction invalid!
> Device 1 direction invalid!
> Device 1 don't support mem2dev transfer
> Device 1 don't support dev2mem transfer
> Device 1 don't support dev2dev transfer
> Device 1 number of descriptors invalid
> Device 1 number of descriptors invalid
> Device 1 source port type invalid
> Device 1 destination port type invalid
> test_dmadev_vchan_setup Passed
> Invalid dev_id=64
> Invalid dev_id=64
> Device 1 must be stopped to allow configuration
> Device 1 must be stopped to allow configuration
> test_dmadev_start_stop Passed
> Invalid dev_id=64
> Invalid dev_id=64
> Invalid dev_id=64
> Device 1 vchan 1 out of range
> Device 1 vchan 1 out of range
> test_dmadev_stats Passed
> test_dmadev_completed Passed
> test_dmadev_completed_status Passed
> Device 1 already stopped
> Total tests   : 10
> Passed        : 10
> Failed        : 0
> skeldma_remove(): Remove dma_skeleton dmadev
> 
> To:
> 
> ### Test dmadev infrastructure using skeleton driver
> test_dmadev_get_dev_id Passed
> test_dmadev_is_valid_dev Passed
> test_dmadev_count Passed
> test_dmadev_info_get Passed
> test_dmadev_configure Passed
> test_dmadev_vchan_setup Passed
> test_dmadev_start_stop Passed
> test_dmadev_stats Passed
> test_dmadev_completed Passed
> test_dmadev_completed_status Passed
> Total tests   : 10
> Passed        : 10
> Failed        : 0
> skeldma_remove(): Remove dma_skeleton dmadev
> 
> Thanks,
> Conor.
> .
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library public APIs
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library public APIs Chengwen Feng
@ 2021-09-03 11:42     ` Gagandeep Singh
  2021-09-04  1:31       ` fengchengwen
  2021-09-03 13:03     ` Bruce Richardson
                       ` (2 subsequent siblings)
  3 siblings, 1 reply; 339+ messages in thread
From: Gagandeep Singh @ 2021-09-03 11:42 UTC (permalink / raw)
  To: Chengwen Feng, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, Nipun Gupta, Hemant Agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

Hi,

<snip>
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Close a DMA device.
> + *
> + * The device cannot be restarted after this call.
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + *
> + * @return
> + *   0 on success. Otherwise negative value is returned.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_close(uint16_t dev_id);
> +
> +/**
> + * rte_dma_direction - DMA transfer direction defines.
> + */
> +enum rte_dma_direction {
> +	RTE_DMA_DIR_MEM_TO_MEM,
> +	/**< DMA transfer direction - from memory to memory.
> +	 *
> +	 * @see struct rte_dmadev_vchan_conf::direction
> +	 */
> +	RTE_DMA_DIR_MEM_TO_DEV,
> +	/**< DMA transfer direction - from memory to device.
> +	 * In a typical scenario, the SoCs are installed on host servers as
> +	 * iNICs through the PCIe interface. In this case, the SoCs works in
> +	 * EP(endpoint) mode, it could initiate a DMA move request from
> memory
> +	 * (which is SoCs memory) to device (which is host memory).
> +	 *
> +	 * @see struct rte_dmadev_vchan_conf::direction
> +	 */
> +	RTE_DMA_DIR_DEV_TO_MEM,
> +	/**< DMA transfer direction - from device to memory.
> +	 * In a typical scenario, the SoCs are installed on host servers as
> +	 * iNICs through the PCIe interface. In this case, the SoCs works in
> +	 * EP(endpoint) mode, it could initiate a DMA move request from device
> +	 * (which is host memory) to memory (which is SoCs memory).
> +	 *
> +	 * @see struct rte_dmadev_vchan_conf::direction
> +	 */
> +	RTE_DMA_DIR_DEV_TO_DEV,
> +	/**< DMA transfer direction - from device to device.
> +	 * In a typical scenario, the SoCs are installed on host servers as
> +	 * iNICs through the PCIe interface. In this case, the SoCs works in
> +	 * EP(endpoint) mode, it could initiate a DMA move request from device
> +	 * (which is host memory) to the device (which is another host memory).
> +	 *
> +	 * @see struct rte_dmadev_vchan_conf::direction
> +	 */
> +};
> +
> +/**
>..
The enum rte_dma_direction must have a member RTE_DMA_DIR_ANY for a channel that supports all 4 directions.
<snip>


Regards,
Gagan

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v18 8/8] maintainers: add for dmadev
  2021-09-02 13:39       ` fengchengwen
@ 2021-09-03 12:59         ` Maxime Coquelin
  2021-09-04  7:02           ` fengchengwen
  2021-09-06  2:03           ` Xia, Chenbo
  0 siblings, 2 replies; 339+ messages in thread
From: Maxime Coquelin @ 2021-09-03 12:59 UTC (permalink / raw)
  To: fengchengwen, Bruce Richardson, Li, Xiaoyun
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, honnappa.nagarahalli,
	david.marchand, sburla, pkapoor, konstantin.ananyev, conor.walsh,
	chenbo.xia

Hi,

On 9/2/21 3:39 PM, fengchengwen wrote:
> Fix in v19
> 
> I think there many patches wait for dmadev framework upstream, so
> could you help review unreviewd patches (like dma/skeleton and app/test)?

Thanks for all the work, it looks really promising!

> Also, we have developed the dma driver for hisilicon, and corresponding test
> app (like examples/vhost and testpmd) is being developed.
> 
> examples/vhost: will extend support dmadev type.

I think Sunil has posted RFC for Vhost lib and example, you might want
to have a look to avoid duplicate work.

> testpmd: will introduces some dma control commands and the dma forwarding mode.
>          the dma forwarding mode process:
>               // 1st: call rte_eth_rx_burst
>               // 2st: post received packet to dma, move data to another packet
>               // 3st: set newcopyed rte_mbuf header
>               // 4st: free received packet
>               // 5st: get dma completed request and associate it rte_mbuf
>               // 6st: send 5st's rte_mbuf to nic
> 
> @Maxime @Chenbo @Xiaoyun Like to hear your opinion.

We might also think of adding async support to Vhost PMD, that would be
another way to test dmadev with testpmd.

Thanks,
Maxime

> 
> On 2021/9/2 19:51, Bruce Richardson wrote:
>> On Thu, Sep 02, 2021 at 06:54:17PM +0800, Chengwen Feng wrote:
>>> This patch add myself as dmadev's maintainer and update release notes.
>>>
>>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com> ---
>>
>> Just in case you are doing any further revisions of this patchset, the
>> maintainers entry, and RN entry, generally is added in the first patch, so
>> squash this patch into patch #1.
>> .
>>
> 


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library public APIs
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library public APIs Chengwen Feng
  2021-09-03 11:42     ` Gagandeep Singh
@ 2021-09-03 13:03     ` Bruce Richardson
  2021-09-04  3:05       ` fengchengwen
  2021-09-04 10:10       ` Morten Brørup
  2021-09-03 15:13     ` Kevin Laatz
  2021-09-03 15:35     ` Conor Walsh
  3 siblings, 2 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-09-03 13:03 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

On Thu, Sep 02, 2021 at 09:13:09PM +0800, Chengwen Feng wrote:
> The 'dmadevice' is a generic type of DMA device.
> 
> This patch introduce the 'dmadevice' public APIs which expose generic
> operations that can enable configuration and I/O with the DMA devices.
> 
> Maintainers update is also included in this patch.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
> ---
>  MAINTAINERS                            |   4 +
>  doc/api/doxy-api-index.md              |   1 +
>  doc/api/doxy-api.conf.in               |   1 +
>  doc/guides/rel_notes/release_21_11.rst |   5 +
>  lib/dmadev/meson.build                 |   4 +
>  lib/dmadev/rte_dmadev.h                | 949 +++++++++++++++++++++++++++++++++
>  lib/dmadev/version.map                 |  24 +
>  lib/meson.build                        |   1 +
>  8 files changed, 989 insertions(+)
>  create mode 100644 lib/dmadev/meson.build
>  create mode 100644 lib/dmadev/rte_dmadev.h
>  create mode 100644 lib/dmadev/version.map
> 

<snip>

> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Trigger hardware to begin performing enqueued operations.
> + *
> + * This API is used to write the "doorbell" to the hardware to trigger it
> + * to begin the operations previously enqueued by rte_dmadev_copy/fill().
> + *
> + * @param dev_id
> + *   The identifier of the device.
> + * @param vchan
> + *   The identifier of virtual DMA channel.
> + *
> + * @return
> + *   0 on success. Otherwise negative value is returned.
> + */
> +__rte_experimental
> +int
> +rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
> +

Putting this out here for discussion:

Those developers here looking at how integration of dma acceleration into
vhost-virtio e.g. for OVS use, have come back with the request that we
provide a method for querying the amount of space in the descriptor ring,
or the size of the next burst, or similar. Basically, the reason for the
ask is to allow an app to determine if a set of jobs of size N can be
enqueued before the first one is, so that we don't get a half-offload of
copy of a multi-segment packet (for devices where scatter-gather is not
available).

In our "ioat" rawdev driver, we did this by providing a "burst_capacity"
API which returned the number of elements which could be enqueued in the
next burst without error (normally the available ring space). Looking at
the dmadev APIs, an alternative way to do this is to extend the "submit()"
function to allow a 3rd optional parameter to return this info. That is,
when submitting one burst of operations, you get info about how many more
you can enqueue in the next burst. [For submitting packets via the submit
flag, this info would not be available, as I feel ending all enqueue
operations would be excessive].

Therefore, I see a number of options for us to meet the ask for space
querying API:
1. provide a capacity API as done with ioat driver
2. provide (optional) capacity information from each submit() call
3. provide both #1 and #2 above as they are compatible
4. <some other idea>

For me, I think #3 is probably the most flexible approach. The benefit of
#2 is that the info can be provided to the application much more cheaply
than when the app has to call a separate API (which wouldn't be on the
fast-path). However, a way to provide the info apart from submitting a
burst would also be helpful, hence adding the extra function too (#1).

What are other people's thoughts or ideas on this?

Regards,
/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library public APIs
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library public APIs Chengwen Feng
  2021-09-03 11:42     ` Gagandeep Singh
  2021-09-03 13:03     ` Bruce Richardson
@ 2021-09-03 15:13     ` Kevin Laatz
  2021-09-03 15:35     ` Conor Walsh
  3 siblings, 0 replies; 339+ messages in thread
From: Kevin Laatz @ 2021-09-03 15:13 UTC (permalink / raw)
  To: Chengwen Feng, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

On 02/09/2021 14:13, Chengwen Feng wrote:
> The 'dmadevice' is a generic type of DMA device.
>
> This patch introduce the 'dmadevice' public APIs which expose generic
> operations that can enable configuration and I/O with the DMA devices.
>
> Maintainers update is also included in this patch.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
> ---
>   MAINTAINERS                            |   4 +
>   doc/api/doxy-api-index.md              |   1 +
>   doc/api/doxy-api.conf.in               |   1 +
>   doc/guides/rel_notes/release_21_11.rst |   5 +
>   lib/dmadev/meson.build                 |   4 +
>   lib/dmadev/rte_dmadev.h                | 949 +++++++++++++++++++++++++++++++++
>   lib/dmadev/version.map                 |  24 +
>   lib/meson.build                        |   1 +
>   8 files changed, 989 insertions(+)
>   create mode 100644 lib/dmadev/meson.build
>   create mode 100644 lib/dmadev/rte_dmadev.h
>   create mode 100644 lib/dmadev/version.map
>
<snip>
> +
> +/**
> + * rte_dma_direction - DMA transfer direction defines.
> + */
No need to have the struct name in the comment.
> +enum rte_dma_direction {
> +	RTE_DMA_DIR_MEM_TO_MEM,
> +	/**< DMA transfer direction - from memory to memory.
> +	 *
> +	 * @see struct rte_dmadev_vchan_conf::direction
> +	 */
> +	RTE_DMA_DIR_MEM_TO_DEV,
> +	/**< DMA transfer direction - from memory to device.
> +	 * In a typical scenario, the SoCs are installed on host servers as
> +	 * iNICs through the PCIe interface. In this case, the SoCs works in
> +	 * EP(endpoint) mode, it could initiate a DMA move request from memory
> +	 * (which is SoCs memory) to device (which is host memory).
> +	 *
> +	 * @see struct rte_dmadev_vchan_conf::direction
> +	 */
> +	RTE_DMA_DIR_DEV_TO_MEM,
> +	/**< DMA transfer direction - from device to memory.
> +	 * In a typical scenario, the SoCs are installed on host servers as
> +	 * iNICs through the PCIe interface. In this case, the SoCs works in
> +	 * EP(endpoint) mode, it could initiate a DMA move request from device
> +	 * (which is host memory) to memory (which is SoCs memory).
> +	 *
> +	 * @see struct rte_dmadev_vchan_conf::direction
> +	 */
> +	RTE_DMA_DIR_DEV_TO_DEV,
> +	/**< DMA transfer direction - from device to device.
> +	 * In a typical scenario, the SoCs are installed on host servers as
> +	 * iNICs through the PCIe interface. In this case, the SoCs works in
> +	 * EP(endpoint) mode, it could initiate a DMA move request from device
> +	 * (which is host memory) to the device (which is another host memory).
> +	 *
> +	 * @see struct rte_dmadev_vchan_conf::direction
> +	 */
> +};
> +
> +/**
> + * enum rte_dmadev_port_type - DMA access port type defines.
> + *
> + * @see struct rte_dmadev_port_param::port_type
> + */
> +enum rte_dmadev_port_type {
> +	RTE_DMADEV_PORT_NONE,
> +	RTE_DMADEV_PORT_PCIE, /**< The DMA access port is PCIe. */
> +};
> +
<snip>
> +
> +/**
> + * rte_dmadev_stats - running statistics.
> + */

No need to have the struct name in the comment. Maybe "Operation 
statistic counters"?


> +struct rte_dmadev_stats {
> +	uint64_t submitted;
> +	/**< Count of operations which were submitted to hardware. */
> +	uint64_t completed;
> +	/**< Count of operations which were completed. */
> +	uint64_t errors;
> +	/**< Count of operations which failed to complete. */
> +};

The comments here are a little ambiguous, it would be better to 
explicitly mention that "errors" is a subset of "completed" and not an 
independent statistic.


<snip>
> +
> +/**
> + * rte_dmadev_sge - can hold scatter-gather DMA operation request entry.
> + */
No need to have the struct name in the comment.
> +struct rte_dmadev_sge {
> +	rte_iova_t addr; /**< The DMA operation address. */
> +	uint32_t length; /**< The DMA operation length. */
> +};
> +

Apart from the minor comments, LGTM.

Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 2/7] dmadev: introduce DMA device library internal header
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 2/7] dmadev: introduce DMA device library internal header Chengwen Feng
@ 2021-09-03 15:13     ` Kevin Laatz
  2021-09-03 15:35     ` Conor Walsh
  1 sibling, 0 replies; 339+ messages in thread
From: Kevin Laatz @ 2021-09-03 15:13 UTC (permalink / raw)
  To: Chengwen Feng, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

On 02/09/2021 14:13, Chengwen Feng wrote:
> This patch introduce DMA device library internal header, which contains
> internal data types that are used by the DMA devices in order to expose
> their ops to the class.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> ---
>   lib/dmadev/meson.build       |   1 +
>   lib/dmadev/rte_dmadev_core.h | 176 +++++++++++++++++++++++++++++++++++++++++++
>   2 files changed, 177 insertions(+)
>   create mode 100644 lib/dmadev/rte_dmadev_core.h
>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>



^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 3/7] dmadev: introduce DMA device library PMD header
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 3/7] dmadev: introduce DMA device library PMD header Chengwen Feng
@ 2021-09-03 15:13     ` Kevin Laatz
  2021-09-03 15:35     ` Conor Walsh
  1 sibling, 0 replies; 339+ messages in thread
From: Kevin Laatz @ 2021-09-03 15:13 UTC (permalink / raw)
  To: Chengwen Feng, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

On 02/09/2021 14:13, Chengwen Feng wrote:
> This patch introduce DMA device library PMD header which was driver
> facing APIs for a DMA device.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> ---
>   lib/dmadev/meson.build      |  1 +
>   lib/dmadev/rte_dmadev.h     |  2 ++
>   lib/dmadev/rte_dmadev_pmd.h | 72 +++++++++++++++++++++++++++++++++++++++++++++
>   lib/dmadev/version.map      | 10 +++++++
>   4 files changed, 85 insertions(+)
>   create mode 100644 lib/dmadev/rte_dmadev_pmd.h
>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 4/7] dmadev: introduce DMA device library implementation
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 4/7] dmadev: introduce DMA device library implementation Chengwen Feng
@ 2021-09-03 15:13     ` Kevin Laatz
  2021-09-03 15:30       ` Bruce Richardson
  2021-09-03 15:35     ` Conor Walsh
  1 sibling, 1 reply; 339+ messages in thread
From: Kevin Laatz @ 2021-09-03 15:13 UTC (permalink / raw)
  To: Chengwen Feng, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

On 02/09/2021 14:13, Chengwen Feng wrote:
> This patch introduce DMA device library implementation which includes
> configuration and I/O with the DMA devices.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> ---
>   config/rte_config.h          |   3 +
>   lib/dmadev/meson.build       |   1 +
>   lib/dmadev/rte_dmadev.c      | 614 +++++++++++++++++++++++++++++++++++++++++++
>   lib/dmadev/rte_dmadev.h      | 118 ++++++++-
>   lib/dmadev/rte_dmadev_core.h |   2 +
>   lib/dmadev/version.map       |   1 +
>   6 files changed, 727 insertions(+), 12 deletions(-)
>   create mode 100644 lib/dmadev/rte_dmadev.c
>
> diff --git a/config/rte_config.h b/config/rte_config.h
> index 590903c..331a431 100644
> --- a/config/rte_config.h
> +++ b/config/rte_config.h
> @@ -81,6 +81,9 @@
>   /* rawdev defines */
>   #define RTE_RAWDEV_MAX_DEVS 64
>   
> +/* dmadev defines */
> +#define RTE_DMADEV_MAX_DEVS 64
> +
>   /* ip_fragmentation defines */
>   #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
>   #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
> diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
> index 833baf7..d2fc85e 100644
> --- a/lib/dmadev/meson.build
> +++ b/lib/dmadev/meson.build
> @@ -1,6 +1,7 @@
>   # SPDX-License-Identifier: BSD-3-Clause
>   # Copyright(c) 2021 HiSilicon Limited.
>   
> +sources = files('rte_dmadev.c')
>   headers = files('rte_dmadev.h')
>   indirect_headers += files('rte_dmadev_core.h')
>   driver_sdk_headers += files('rte_dmadev_pmd.h')
> diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
> new file mode 100644
> index 0000000..877eead
> --- /dev/null
> +++ b/lib/dmadev/rte_dmadev.c
> @@ -0,0 +1,614 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + * Copyright(c) 2021 Intel Corporation.
> + */
> +
> +#include <ctype.h>
> +#include <inttypes.h>
> +#include <stdint.h>
> +#include <stdio.h>
> +#include <stdlib.h>
> +#include <string.h>
> +
> +#include <rte_debug.h>
> +#include <rte_dev.h>
> +#include <rte_eal.h>
> +#include <rte_errno.h>
> +#include <rte_lcore.h>
> +#include <rte_log.h>
> +#include <rte_memory.h>
> +#include <rte_memzone.h>
> +#include <rte_malloc.h>
> +#include <rte_string_fns.h>
> +
> +#include "rte_dmadev.h"
> +#include "rte_dmadev_pmd.h"
> +

Many of these includes can be removed from this file, as they are 
already included elsewhere (eg. rte_common.h via rte_dmadev.h).

For example, you could remove: ctype.h, stdint.h, stdlib.h, rte_errno.h, 
rte_lcore.h, rte_memory.h, rte_malloc.h, rte_dev...

Please run test-meson-builds.sh after removing to make sure there are no 
missing dependencies.

<snip>


With the above comment addressed,

Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 5/7] doc: add DMA device library guide
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 5/7] doc: add DMA device library guide Chengwen Feng
@ 2021-09-03 15:13     ` Kevin Laatz
  0 siblings, 0 replies; 339+ messages in thread
From: Kevin Laatz @ 2021-09-03 15:13 UTC (permalink / raw)
  To: Chengwen Feng, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

On 02/09/2021 14:13, Chengwen Feng wrote:
> This patch adds dmadev library guide.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Acked-by: Conor Walsh <conor.walsh@intel.com>
> ---
>   MAINTAINERS                          |   1 +
>   doc/guides/prog_guide/dmadev.rst     | 125 ++++++++++++++++
>   doc/guides/prog_guide/img/dmadev.svg | 283 +++++++++++++++++++++++++++++++++++
>   doc/guides/prog_guide/index.rst      |   1 +
>   4 files changed, 410 insertions(+)
>   create mode 100644 doc/guides/prog_guide/dmadev.rst
>   create mode 100644 doc/guides/prog_guide/img/dmadev.svg
>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 6/7] dma/skeleton: introduce skeleton dmadev driver
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 6/7] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
@ 2021-09-03 15:14     ` Kevin Laatz
  2021-09-04  7:17       ` fengchengwen
  2021-09-03 15:36     ` Conor Walsh
  1 sibling, 1 reply; 339+ messages in thread
From: Kevin Laatz @ 2021-09-03 15:14 UTC (permalink / raw)
  To: Chengwen Feng, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

On 02/09/2021 14:13, Chengwen Feng wrote:
> Skeleton dmadevice driver, on the lines of rawdev skeleton, is for
> showcasing of the dmadev library.
>
> Design of skeleton involves a virtual device which is plugged into VDEV
> bus on initialization.
>
> Also, enable compilation of dmadev skeleton drivers.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> ---
>   MAINTAINERS                            |   1 +
>   drivers/dma/meson.build                |  11 +
>   drivers/dma/skeleton/meson.build       |   7 +
>   drivers/dma/skeleton/skeleton_dmadev.c | 601 +++++++++++++++++++++++++++++++++
>   drivers/dma/skeleton/skeleton_dmadev.h |  59 ++++
>   drivers/dma/skeleton/version.map       |   3 +
>   drivers/meson.build                    |   1 +
>   7 files changed, 683 insertions(+)
>   create mode 100644 drivers/dma/meson.build
>   create mode 100644 drivers/dma/skeleton/meson.build
>   create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
>   create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
>   create mode 100644 drivers/dma/skeleton/version.map
>
<snip>
> diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c
> new file mode 100644
> index 0000000..7033062
> --- /dev/null
> +++ b/drivers/dma/skeleton/skeleton_dmadev.c
> @@ -0,0 +1,601 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + */
> +
> +#include <errno.h>
> +#include <inttypes.h>
> +#include <stdio.h>
> +#include <stdbool.h>
> +#include <stdint.h>
> +#include <string.h>
> +
> +#include <rte_bus_vdev.h>
> +#include <rte_common.h>
> +#include <rte_cycles.h>
> +#include <rte_debug.h>
> +#include <rte_dev.h>
> +#include <rte_eal.h>
> +#include <rte_kvargs.h>
> +#include <rte_lcore.h>
> +#include <rte_log.h>
> +#include <rte_malloc.h>
> +#include <rte_memory.h>
> +#include <rte_memcpy.h>
> +#include <rte_ring.h>
> +

This list of includes is very long, many of these are likely included 
via rte_common already, for example. Please check this and remove 
redundant includes.

> +#include <rte_dmadev_pmd.h>
> +
> +#include "skeleton_dmadev.h"
> +

<snip>


>
> +
> +static int
> +vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc)
> +{
> +	struct skeldma_desc *desc;
> +	struct rte_ring *empty;
> +	struct rte_ring *pending;
> +	struct rte_ring *running;
> +	struct rte_ring *completed;
> +	uint16_t i;
> +
> +	desc = rte_zmalloc_socket("dma_skelteon_desc",
> +				  nb_desc * sizeof(struct skeldma_desc),
> +				  RTE_CACHE_LINE_SIZE, hw->socket_id);
> +	if (desc == NULL) {
> +		SKELDMA_LOG(ERR, "Malloc dma skeleton desc fail!");
> +		return -ENOMEM;
> +	}
> +
> +	empty = rte_ring_create("dma_skeleton_desc_empty", nb_desc,
> +				hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
> +	pending = rte_ring_create("dma_skeleton_desc_pending", nb_desc,
> +				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
> +	running = rte_ring_create("dma_skeleton_desc_running", nb_desc,
> +				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
> +	completed = rte_ring_create("dma_skeleton_desc_completed", nb_desc,
> +				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
> +	if (empty == NULL || pending == NULL || running == NULL ||
> +	    completed == NULL) {
> +		SKELDMA_LOG(ERR, "Create dma skeleton desc ring fail!");
> +		rte_ring_free(empty);
> +		rte_ring_free(pending);
> +		rte_ring_free(running);
> +		rte_ring_free(completed);
> +		rte_free(desc);

These pointers should be set to NULL after free'ing, similar to what you 
have in "vchan_release()".


> +		return -ENOMEM;
> +	}
> +
> +	/* The real usable ring size is *count-1* instead of *count* to
> +	 * differentiate a free ring from an empty ring.
> +	 * @see rte_ring_create
> +	 */
> +	for (i = 0; i < nb_desc - 1; i++)
> +		(void)rte_ring_enqueue(empty, (void *)(desc + i));
> +
> +	hw->desc_mem = desc;
> +	hw->desc_empty = empty;
> +	hw->desc_pending = pending;
> +	hw->desc_running = running;
> +	hw->desc_completed = completed;
> +
> +	return 0;
> +}
> +
> +static void
> +vchan_release(struct skeldma_hw *hw)
> +{
> +	if (hw->desc_mem == NULL)
> +		return;
> +
> +	rte_free(hw->desc_mem);
> +	hw->desc_mem = NULL;
> +	rte_ring_free(hw->desc_empty);
> +	hw->desc_empty = NULL;
> +	rte_ring_free(hw->desc_pending);
> +	hw->desc_pending = NULL;
> +	rte_ring_free(hw->desc_running);
> +	hw->desc_running = NULL;
> +	rte_ring_free(hw->desc_completed);
> +	hw->desc_completed = NULL;
> +}
> +
>
<snip>

With the minor comments above addressed,

Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>




^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 7/7] app/test: add dmadev API test
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 7/7] app/test: add dmadev API test Chengwen Feng
  2021-09-02 14:11     ` Walsh, Conor
@ 2021-09-03 15:14     ` Kevin Laatz
  1 sibling, 0 replies; 339+ messages in thread
From: Kevin Laatz @ 2021-09-03 15:14 UTC (permalink / raw)
  To: Chengwen Feng, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

On 02/09/2021 14:13, Chengwen Feng wrote:
> This patch add dmadev API test which based on 'dma_skeleton' vdev. The
> test cases could be executed using 'dmadev_autotest' command in test
> framework.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
> ---
>   MAINTAINERS                |   1 +
>   app/test/meson.build       |   4 +
>   app/test/test_dmadev.c     |  45 ++++
>   app/test/test_dmadev_api.c | 532 +++++++++++++++++++++++++++++++++++++++++++++
>   4 files changed, 582 insertions(+)
>   create mode 100644 app/test/test_dmadev.c
>   create mode 100644 app/test/test_dmadev_api.c
>
<snip>


> +
> +REGISTER_TEST_COMMAND(dmadev_autotest, test_dmadev);
> diff --git a/app/test/test_dmadev_api.c b/app/test/test_dmadev_api.c
> new file mode 100644
> index 0000000..a7795eb
> --- /dev/null
> +++ b/app/test/test_dmadev_api.c
> @@ -0,0 +1,532 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + */
> +
> +#include <stdint.h>
> +#include <string.h>
> +
> +#include <rte_common.h>
> +#include <rte_cycles.h>
> +#include <rte_malloc.h>
> +#include <rte_test.h>
> +#include <rte_dmadev.h>
> +
> +extern int test_dmadev_api(uint16_t dev_id);
> +
> +#define SKELDMA_TEST_RUN(test) \
> +	testsuite_run_test(test, #test)
> +
> +#define TEST_MEMCPY_SIZE	1024
> +#define TEST_WAIT_US_VAL	50000
> +
> +#define TEST_SUCCESS 0
> +#define TEST_FAILED  -1
> +
> +static uint16_t test_dev_id;
> +static uint16_t invalid_dev_id;
> +
> +static int total;
> +static int passed;
> +static int failed;
> +static char *src;
> +static char *dst;
> +
> +static int
> +testsuite_setup(uint16_t dev_id)
> +{
> +	test_dev_id = dev_id;
> +	invalid_dev_id = RTE_DMADEV_MAX_DEVS;
> +
> +	src = rte_malloc("dmadev_test_src", TEST_MEMCPY_SIZE, 0);
> +	if (src == NULL)
> +		return -ENOMEM;
> +	dst = rte_malloc("dmadev_test_dst", TEST_MEMCPY_SIZE, 0);
> +	if (dst == NULL)
> +		return -ENOMEM;
> +
> +	total = 0;
> +	passed = 0;
> +	failed = 0;
> +
> +	return 0;
> +}
> +
> +static void
> +testsuite_teardown(void)
> +{
> +	rte_free(src);
> +	rte_free(dst);

These should be set to NULL after free.


> +	/* Ensure the dmadev is stopped. */
> +	rte_dmadev_stop(test_dev_id);
> +}
> +

<snip>


> +
> +int
> +test_dmadev_api(uint16_t dev_id)
> +{
> +	int ret = testsuite_setup(dev_id);
> +	if (ret) {

If testsuite setup fails, src/dst potentially need to be free'd, so 
"testsuite_teardown()" should be called here.


> +		printf("testsuite setup fail!\n");
> +		return -1;
> +	}
> +
> +	/* If the testcase exit successfully, ensure that the test dmadev exist
> +	 * and the dmadev is in the stopped state.
> +	 */
> +	SKELDMA_TEST_RUN(test_dmadev_get_dev_id);
> +	SKELDMA_TEST_RUN(test_dmadev_is_valid_dev);
> +	SKELDMA_TEST_RUN(test_dmadev_count);
> +	SKELDMA_TEST_RUN(test_dmadev_info_get);
> +	SKELDMA_TEST_RUN(test_dmadev_configure);
> +	SKELDMA_TEST_RUN(test_dmadev_vchan_setup);
> +	SKELDMA_TEST_RUN(test_dmadev_start_stop);
> +	SKELDMA_TEST_RUN(test_dmadev_stats);
> +	SKELDMA_TEST_RUN(test_dmadev_dump);
> +	SKELDMA_TEST_RUN(test_dmadev_completed);
> +	SKELDMA_TEST_RUN(test_dmadev_completed_status);
> +
> +	testsuite_teardown();
> +
> +	printf("Total tests   : %d\n", total);
> +	printf("Passed        : %d\n", passed);
> +	printf("Failed        : %d\n", failed);
> +
> +	if (failed)
> +		return -1;
> +
> +	return 0;
> +};

With the above comments addressed,

Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>



^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 4/7] dmadev: introduce DMA device library implementation
  2021-09-03 15:13     ` Kevin Laatz
@ 2021-09-03 15:30       ` Bruce Richardson
  0 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-09-03 15:30 UTC (permalink / raw)
  To: Kevin Laatz
  Cc: Chengwen Feng, thomas, ferruh.yigit, jerinj, jerinjacobk,
	andrew.rybchenko, dev, mb, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh

On Fri, Sep 03, 2021 at 04:13:41PM +0100, Kevin Laatz wrote:
> On 02/09/2021 14:13, Chengwen Feng wrote:
> > +++ b/lib/dmadev/rte_dmadev.c
> > @@ -0,0 +1,614 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2021 HiSilicon Limited.
> > + * Copyright(c) 2021 Intel Corporation.
> > + */
> > +
> > +#include <ctype.h>
> > +#include <inttypes.h>
> > +#include <stdint.h>
> > +#include <stdio.h>
> > +#include <stdlib.h>
> > +#include <string.h>
> > +
> > +#include <rte_debug.h>
> > +#include <rte_dev.h>
> > +#include <rte_eal.h>
> > +#include <rte_errno.h>
> > +#include <rte_lcore.h>
> > +#include <rte_log.h>
> > +#include <rte_memory.h>
> > +#include <rte_memzone.h>
> > +#include <rte_malloc.h>
> > +#include <rte_string_fns.h>
> > +
> > +#include "rte_dmadev.h"
> > +#include "rte_dmadev_pmd.h"
> > +
> 
> Many of these includes can be removed from this file, as they are already
> included elsewhere (eg. rte_common.h via rte_dmadev.h).
> 
> For example, you could remove: ctype.h, stdint.h, stdlib.h, rte_errno.h,
> rte_lcore.h, rte_memory.h, rte_malloc.h, rte_dev...
> 
> Please run test-meson-builds.sh after removing to make sure there are no
> missing dependencies.
> 
> <snip>
> 
Actually, first the rte_dmadev.h header should probably have excess headers
stripped. Doing quick compilation testing, I still get ok builds from the
patchset with "rte_common.h", "rte_dev.h", "rte_errno.h" and "rte_memory.h"
removed. On the other hand, for completeness, I believe "stdint.h" should
be added for the "uint*_t" types, leaving the top-of-file header includes
as:

#include <stdint.h>      /* for uint* types */
#include <rte_compat.h>  /* for __rte_experimental */

Note: this is not from comprehensive tests just quick builds and looking at
the code.

Regards,
/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library public APIs
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library public APIs Chengwen Feng
                       ` (2 preceding siblings ...)
  2021-09-03 15:13     ` Kevin Laatz
@ 2021-09-03 15:35     ` Conor Walsh
  3 siblings, 0 replies; 339+ messages in thread
From: Conor Walsh @ 2021-09-03 15:35 UTC (permalink / raw)
  To: Chengwen Feng, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev


> The 'dmadevice' is a generic type of DMA device.
>
> This patch introduce the 'dmadevice' public APIs which expose generic
> operations that can enable configuration and I/O with the DMA devices.
>
> Maintainers update is also included in this patch.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
> ---
<snip>
> +
> +/**
> + * rte_dmadev_stats - running statistics.
> + */
> +struct rte_dmadev_stats {
> +	uint64_t submitted;
> +	/**< Count of operations which were submitted to hardware. */
> +	uint64_t completed;
> +	/**< Count of operations which were completed. */
> +	uint64_t errors;
> +	/**< Count of operations which failed to complete. */
> +};

Please make it clear that completed is the total completed operations 
including any failures.

<snip>

Reviewed-by: Conor Walsh <conor.walsh@intel.com>


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 2/7] dmadev: introduce DMA device library internal header
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 2/7] dmadev: introduce DMA device library internal header Chengwen Feng
  2021-09-03 15:13     ` Kevin Laatz
@ 2021-09-03 15:35     ` Conor Walsh
  1 sibling, 0 replies; 339+ messages in thread
From: Conor Walsh @ 2021-09-03 15:35 UTC (permalink / raw)
  To: Chengwen Feng, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev


> This patch introduce DMA device library internal header, which contains
> internal data types that are used by the DMA devices in order to expose
> their ops to the class.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> ---

<snip>

Reviewed-by: Conor Walsh <conor.walsh@intel.com>


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 3/7] dmadev: introduce DMA device library PMD header
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 3/7] dmadev: introduce DMA device library PMD header Chengwen Feng
  2021-09-03 15:13     ` Kevin Laatz
@ 2021-09-03 15:35     ` Conor Walsh
  1 sibling, 0 replies; 339+ messages in thread
From: Conor Walsh @ 2021-09-03 15:35 UTC (permalink / raw)
  To: Chengwen Feng, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev


> This patch introduce DMA device library PMD header which was driver
> facing APIs for a DMA device.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> ---

<snip>

Reviewed-by: Conor Walsh <conor.walsh@intel.com>


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 4/7] dmadev: introduce DMA device library implementation
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 4/7] dmadev: introduce DMA device library implementation Chengwen Feng
  2021-09-03 15:13     ` Kevin Laatz
@ 2021-09-03 15:35     ` Conor Walsh
  2021-09-04  8:52       ` fengchengwen
  1 sibling, 1 reply; 339+ messages in thread
From: Conor Walsh @ 2021-09-03 15:35 UTC (permalink / raw)
  To: Chengwen Feng, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev


> This patch introduce DMA device library implementation which includes
> configuration and I/O with the DMA devices.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> ---
<snip>
> +
> +static int
> +dmadev_shared_data_prepare(void)
> +{
> +	const struct rte_memzone *mz;
> +
> +	if (dmadev_shared_data == NULL) {
> +		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
> +			/* Allocate port data and ownership shared memory. */
> +			mz = rte_memzone_reserve(mz_rte_dmadev_data,
> +					 sizeof(*dmadev_shared_data),
> +					 rte_socket_id(), 0);
> +		} else
> +			mz = rte_memzone_lookup(mz_rte_dmadev_data);
> +		if (mz == NULL)
> +			return -ENOMEM;

This memzone is not free'd anywhere in the library, I would suggest 
freeing as part of the release pmd function.

<snip>

Reviewed-by: Conor Walsh <conor.walsh@intel.com>


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 6/7] dma/skeleton: introduce skeleton dmadev driver
  2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 6/7] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
  2021-09-03 15:14     ` Kevin Laatz
@ 2021-09-03 15:36     ` Conor Walsh
  1 sibling, 0 replies; 339+ messages in thread
From: Conor Walsh @ 2021-09-03 15:36 UTC (permalink / raw)
  To: Chengwen Feng, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev


> Skeleton dmadevice driver, on the lines of rawdev skeleton, is for
> showcasing of the dmadev library.
>
> Design of skeleton involves a virtual device which is plugged into VDEV
> bus on initialization.
>
> Also, enable compilation of dmadev skeleton drivers.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> ---
<snip>
> +/* Count of instances */
> +static uint16_t skeldma_init_once;

Either the comment for this line or the variable name need to change as 
they do not line up.

I would suggest:

/* Count of instances, currently only 1 is supported. */

static uint16_t skeldma_count;


Reviewed-by: Conor Walsh <conor.walsh@intel.com>


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 7/7] app/test: add dmadev API test
  2021-09-03  0:39       ` fengchengwen
@ 2021-09-03 15:38         ` Walsh, Conor
  2021-09-04  7:22           ` fengchengwen
  0 siblings, 1 reply; 339+ messages in thread
From: Walsh, Conor @ 2021-09-03 15:38 UTC (permalink / raw)
  To: fengchengwen, thomas, Yigit, Ferruh, Richardson,  Bruce, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor, Ananyev,
	Konstantin, Laatz, Kevin


> This is a tradeoff point. If we changed the log level of dmadev, it is difficult to
> know where the test case fails.
> 
> So I prefer add more meaningful information, at least print out the function
> name.
> 
> And V19 add format function name in log default, so the rte_dmadev's log
> will show like:
> 
> rte_dmadev_configure(): Invalid dev_id=64
> rte_dmadev_configure(): Device 4 configure zero vchans
> rte_dmadev_configure(): Device 4 configure too many vchans
> rte_dmadev_configure(): Device 4 don't support silent
> test_dmadev_configure Passed
> rte_dmadev_vchan_setup(): Invalid dev_id=64
> rte_dmadev_vchan_setup(): Device 4 number of descriptors invalid
> rte_dmadev_vchan_setup(): Device 4 vchan out range!
> rte_dmadev_vchan_setup(): Device 4 direction invalid!
> rte_dmadev_vchan_setup(): Device 4 direction invalid!
> rte_dmadev_vchan_setup(): Device 4 don't support mem2dev transfer
> rte_dmadev_vchan_setup(): Device 4 don't support dev2mem transfer
> rte_dmadev_vchan_setup(): Device 4 don't support dev2dev transfer
> rte_dmadev_vchan_setup(): Device 4 number of descriptors invalid
> rte_dmadev_vchan_setup(): Device 4 number of descriptors invalid
> rte_dmadev_vchan_setup(): Device 4 source port type invalid
> rte_dmadev_vchan_setup(): Device 4 destination port type invalid

I thought it would be cleaner, could you suppress the output only before and re-enable after negative testing?
It's a lot of output to print to a user, in the driver tests extra information is only printed in the case
of an error or failure. The line of code shown below will only suppress output from the dmadev lib not
EAL or test.

I don’t have a very strong opinion either way, I wanted to improve usability.

With or without this change/cleanup:

Reviewed-by: Conor Walsh <conor.walsh@intel.com>

> 
> 
> On 2021/9/2 22:11, Walsh, Conor wrote:
> > Hi Chengwen,
> >
> > The output from the API tests is not very straightforward to interpret if you
> are not familiar with these tests.
> > Could we change the log level of the dmadev library before and after the
> API tests using something similar to
> > The code I have included inline below?
> >
> >> +static int
> >> +testsuite_setup(uint16_t dev_id)
> >> +{
> >> +	test_dev_id = dev_id;
> >> +	invalid_dev_id = RTE_DMADEV_MAX_DEVS;
> >> +
> >> +	src = rte_malloc("dmadev_test_src", TEST_MEMCPY_SIZE, 0);
> >> +	if (src == NULL)
> >> +		return -ENOMEM;
> >> +	dst = rte_malloc("dmadev_test_dst", TEST_MEMCPY_SIZE, 0);
> >> +	if (dst == NULL)
> >> +		return -ENOMEM;
> >
> > 	/* Set dmadev log level to critical to suppress unnecessary output
> during API tests. */
> > 	rte_log_set_level_pattern("lib.dmadev", RTE_LOG_CRIT);
> >> +
> >> +	total = 0;
> >> +	passed = 0;
> >> +	failed = 0;
> >> +
> >> +	return 0;
> >> +}
> >> +
> >> +static void
> >> +testsuite_teardown(void)
> >> +{
> >> +	rte_free(src);
> >> +	rte_free(dst);
> >> +	/* Ensure the dmadev is stopped. */
> >> +	rte_dmadev_stop(test_dev_id);
> > 	rte_log_set_level_pattern("lib.dmadev", RTE_LOG_INFO);
> >> +}
> >
> > This change would bring your output down from:
> >
> > ### Test dmadev infrastructure using skeleton driver
> > test_dmadev_get_dev_id Passed
> > test_dmadev_is_valid_dev Passed
> > test_dmadev_count Passed
> > Invalid dev_id=64
> > test_dmadev_info_get Passed
> > Invalid dev_id=64
> > Device 1 configure zero vchans
> > Device 1 configure too many vchans
> > Device 1 don't support silent
> > test_dmadev_configure Passed
> > Invalid dev_id=64
> > Device 1 number of descriptors invalid
> > Device 1 vchan out range!
> > Device 1 direction invalid!
> > Device 1 direction invalid!
> > Device 1 don't support mem2dev transfer
> > Device 1 don't support dev2mem transfer
> > Device 1 don't support dev2dev transfer
> > Device 1 number of descriptors invalid
> > Device 1 number of descriptors invalid
> > Device 1 source port type invalid
> > Device 1 destination port type invalid
> > test_dmadev_vchan_setup Passed
> > Invalid dev_id=64
> > Invalid dev_id=64
> > Device 1 must be stopped to allow configuration
> > Device 1 must be stopped to allow configuration
> > test_dmadev_start_stop Passed
> > Invalid dev_id=64
> > Invalid dev_id=64
> > Invalid dev_id=64
> > Device 1 vchan 1 out of range
> > Device 1 vchan 1 out of range
> > test_dmadev_stats Passed
> > test_dmadev_completed Passed
> > test_dmadev_completed_status Passed
> > Device 1 already stopped
> > Total tests   : 10
> > Passed        : 10
> > Failed        : 0
> > skeldma_remove(): Remove dma_skeleton dmadev
> >
> > To:
> >
> > ### Test dmadev infrastructure using skeleton driver
> > test_dmadev_get_dev_id Passed
> > test_dmadev_is_valid_dev Passed
> > test_dmadev_count Passed
> > test_dmadev_info_get Passed
> > test_dmadev_configure Passed
> > test_dmadev_vchan_setup Passed
> > test_dmadev_start_stop Passed
> > test_dmadev_stats Passed
> > test_dmadev_completed Passed
> > test_dmadev_completed_status Passed
> > Total tests   : 10
> > Passed        : 10
> > Failed        : 0
> > skeldma_remove(): Remove dma_skeleton dmadev
> >
> > Thanks,
> > Conor.
> > .
> >

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library public APIs
  2021-09-03 11:42     ` Gagandeep Singh
@ 2021-09-04  1:31       ` fengchengwen
  2021-09-06  6:48         ` Gagandeep Singh
  0 siblings, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-09-04  1:31 UTC (permalink / raw)
  To: Gagandeep Singh, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, Nipun Gupta, Hemant Agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

On 2021/9/3 19:42, Gagandeep Singh wrote:
> Hi,
> 
> <snip>
>> +
>> +/**
>> + * @warning
>> + * @b EXPERIMENTAL: this API may change without prior notice.
>> + *
>> + * Close a DMA device.
>> + *
>> + * The device cannot be restarted after this call.
>> + *
>> + * @param dev_id
>> + *   The identifier of the device.
>> + *
>> + * @return
>> + *   0 on success. Otherwise negative value is returned.
>> + */
>> +__rte_experimental
>> +int
>> +rte_dmadev_close(uint16_t dev_id);
>> +
>> +/**
>> + * rte_dma_direction - DMA transfer direction defines.
>> + */
>> +enum rte_dma_direction {
>> +	RTE_DMA_DIR_MEM_TO_MEM,
>> +	/**< DMA transfer direction - from memory to memory.
>> +	 *
>> +	 * @see struct rte_dmadev_vchan_conf::direction
>> +	 */
>> +	RTE_DMA_DIR_MEM_TO_DEV,
>> +	/**< DMA transfer direction - from memory to device.
>> +	 * In a typical scenario, the SoCs are installed on host servers as
>> +	 * iNICs through the PCIe interface. In this case, the SoCs works in
>> +	 * EP(endpoint) mode, it could initiate a DMA move request from
>> memory
>> +	 * (which is SoCs memory) to device (which is host memory).
>> +	 *
>> +	 * @see struct rte_dmadev_vchan_conf::direction
>> +	 */
>> +	RTE_DMA_DIR_DEV_TO_MEM,
>> +	/**< DMA transfer direction - from device to memory.
>> +	 * In a typical scenario, the SoCs are installed on host servers as
>> +	 * iNICs through the PCIe interface. In this case, the SoCs works in
>> +	 * EP(endpoint) mode, it could initiate a DMA move request from device
>> +	 * (which is host memory) to memory (which is SoCs memory).
>> +	 *
>> +	 * @see struct rte_dmadev_vchan_conf::direction
>> +	 */
>> +	RTE_DMA_DIR_DEV_TO_DEV,
>> +	/**< DMA transfer direction - from device to device.
>> +	 * In a typical scenario, the SoCs are installed on host servers as
>> +	 * iNICs through the PCIe interface. In this case, the SoCs works in
>> +	 * EP(endpoint) mode, it could initiate a DMA move request from device
>> +	 * (which is host memory) to the device (which is another host memory).
>> +	 *
>> +	 * @see struct rte_dmadev_vchan_conf::direction
>> +	 */
>> +};
>> +
>> +/**
>> ..
> The enum rte_dma_direction must have a member RTE_DMA_DIR_ANY for a channel that supports all 4 directions.

We've discussed this issue before. The earliest solution was to set up channels to support multiple DIRs, but
no hardware/driver actually used this (at least at that time). they (like octeontx2_dma/dpaa) all setup one logic
channel server single transfer direction.

So, do you have that kind of desire for your driver ?


If you have a strong desire, we'll consider the following options:

Once the channel was setup, there are no other parameters to indicate the copy request's transfer direction.
So I think it is not enough to define RTE_DMA_DIR_ANY only.

Maybe we could add RTE_DMA_OP_xxx marco (RTE_DMA_OP_FLAG_M2M/M2D/D2M/D2D), these macro will as the flags parameter
passsed to enqueue API, so the enqueue API knows which transfer direction the request corresponding.

We can easily expand from the existing framework with following:
a. define capability RTE_DMADEV_CAPA_DIR_ANY, for those device which support it could declare it.
b. define direction macro: RTE_DMA_DIR_ANY
c. define dma_op: RTE_DMA_OP_FLAG_DIR_M2M/M2D/D2M/D2D which will passed as the flags parameters.

For that driver which don't support this feature, just don't declare support it, and framework ensure that
RTE_DMA_DIR_ANY is not passed down, and it can ignored RTE_DMA_OP_FLAG_DIR_xxx flag when enqueue API.

For that driver which support this feature, application could create one channel with RTE_DMA_DIR_ANY or RTE_DMA_DIR_MEM_TO_MEM.
If created with RTE_DMA_DIR_ANY, the RTE_DMA_OP_FLAG_DIR_xxx should be sensed in the driver.
If created with RTE_DMA_DIR_MEM_TO_MEM, the RTE_DMA_OP_FLAG_DIR_xxx could be ignored.


> <snip>
> 
> 
> Regards,
> Gagan
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library public APIs
  2021-09-03 13:03     ` Bruce Richardson
@ 2021-09-04  3:05       ` fengchengwen
  2021-09-04 10:10       ` Morten Brørup
  1 sibling, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-09-04  3:05 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

On 2021/9/3 21:03, Bruce Richardson wrote:
> On Thu, Sep 02, 2021 at 09:13:09PM +0800, Chengwen Feng wrote:
>> The 'dmadevice' is a generic type of DMA device.
>>
>> This patch introduce the 'dmadevice' public APIs which expose generic
>> operations that can enable configuration and I/O with the DMA devices.
>>
>> Maintainers update is also included in this patch.
>>
>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
>> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
>> Acked-by: Morten Brørup <mb@smartsharesystems.com>
>> Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
>> ---
>>  MAINTAINERS                            |   4 +
>>  doc/api/doxy-api-index.md              |   1 +
>>  doc/api/doxy-api.conf.in               |   1 +
>>  doc/guides/rel_notes/release_21_11.rst |   5 +
>>  lib/dmadev/meson.build                 |   4 +
>>  lib/dmadev/rte_dmadev.h                | 949 +++++++++++++++++++++++++++++++++
>>  lib/dmadev/version.map                 |  24 +
>>  lib/meson.build                        |   1 +
>>  8 files changed, 989 insertions(+)
>>  create mode 100644 lib/dmadev/meson.build
>>  create mode 100644 lib/dmadev/rte_dmadev.h
>>  create mode 100644 lib/dmadev/version.map
>>
> 
> <snip>
> 
>> +/**
>> + * @warning
>> + * @b EXPERIMENTAL: this API may change without prior notice.
>> + *
>> + * Trigger hardware to begin performing enqueued operations.
>> + *
>> + * This API is used to write the "doorbell" to the hardware to trigger it
>> + * to begin the operations previously enqueued by rte_dmadev_copy/fill().
>> + *
>> + * @param dev_id
>> + *   The identifier of the device.
>> + * @param vchan
>> + *   The identifier of virtual DMA channel.
>> + *
>> + * @return
>> + *   0 on success. Otherwise negative value is returned.
>> + */
>> +__rte_experimental
>> +int
>> +rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
>> +
> 
> Putting this out here for discussion:
> 
> Those developers here looking at how integration of dma acceleration into
> vhost-virtio e.g. for OVS use, have come back with the request that we
> provide a method for querying the amount of space in the descriptor ring,
> or the size of the next burst, or similar. Basically, the reason for the
> ask is to allow an app to determine if a set of jobs of size N can be
> enqueued before the first one is, so that we don't get a half-offload of
> copy of a multi-segment packet (for devices where scatter-gather is not
> available).

Agree

> 
> In our "ioat" rawdev driver, we did this by providing a "burst_capacity"
> API which returned the number of elements which could be enqueued in the
> next burst without error (normally the available ring space). Looking at
> the dmadev APIs, an alternative way to do this is to extend the "submit()"
> function to allow a 3rd optional parameter to return this info. That is,
> when submitting one burst of operations, you get info about how many more
> you can enqueue in the next burst. [For submitting packets via the submit
> flag, this info would not be available, as I feel ending all enqueue
> operations would be excessive].
> 
> Therefore, I see a number of options for us to meet the ask for space
> querying API:
> 1. provide a capacity API as done with ioat driver
> 2. provide (optional) capacity information from each submit() call
> 3. provide both #1 and #2 above as they are compatible
> 4. <some other idea>

Maybe available ring space could be calculated based on enqueue/completed
ring_idx, and ring_size, in this way, we only need to provide the following
help function, e.g.
  uint16_t rte_dmadev_burst_capacity(uint16_t enqueue_idx, // ring_idx of the latest enqueue
                                     uint16_t completed_idx, // ring_idx of the latest completed
                                     uint16_t ring_size)
  {
    return ring_size - 1 - distance(enqueue_idx, completed_idx);
   }

However, this does not apply to the scatter-gather scenario, in which one
enqueue request may occupy multiple descriptors space.

Alternatively, an sg_avg can be passed in:
  uint16_t rte_dmadev_burst_capacity(uint16_t enqueue_idx, // ring_idx of the latest enqueue
                                     uint16_t completed_idx, // ring_idx of the latest completed
                                     uint16_t ring_size,
                                     uint16_t sg_avg_descs) // average number of descriptors occupied by SG requests
  {
    return ring_size - 1 - (distance(enqueue_idx, completed_idx) * sg_avg_descs)
   }
But it's just an estimate. It's probably too big or too small.

> 
> For me, I think #3 is probably the most flexible approach. The benefit of
> #2 is that the info can be provided to the application much more cheaply
> than when the app has to call a separate API (which wouldn't be on the
> fast-path). However, a way to provide the info apart from submitting a
> burst would also be helpful, hence adding the extra function too (#1).
> 
> What are other people's thoughts or ideas on this?

In terms of the API definition, the two do not seem to be related, so I do not
recommend extending them in submit.

However, for data-plane APIs, certain compromises can be accepted I think. and it work
well in burst enqueue mode:
    1. application maintains a variable of available space, and init it with
       rte_dmadev_burst_capacity() which is new dataplacn API.
    2. enqueue multiple copy request without submit flag.
       before enqueue, application could use step1's available space to check whether
       all requests can be accommodated.
    3. submit doorbell and update available space info.
    4. do other work.
    5. call completed API:
       if this API return >=0 the driver actually available space will increase. it will
       bigger than application's available space.
    6. do other work.
    7. enqueue multiple copy request without submit flag.
       before enqueue, application could use step3's available space to check whether
       all requests can be accommodated.
    8. submit doorbell and update available space again.
    ...

As long as ring_size is set properly (ring_size equals at least 2*burst), the game can be
played.

> 
> Regards,
> /Bruce
> .
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v18 8/8] maintainers: add for dmadev
  2021-09-03 12:59         ` Maxime Coquelin
@ 2021-09-04  7:02           ` fengchengwen
  2021-09-06  1:46             ` Li, Xiaoyun
  2021-09-06  2:03           ` Xia, Chenbo
  1 sibling, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-09-04  7:02 UTC (permalink / raw)
  To: Maxime Coquelin, Bruce Richardson, Li, Xiaoyun
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, honnappa.nagarahalli,
	david.marchand, sburla, pkapoor, konstantin.ananyev, conor.walsh,
	chenbo.xia

On 2021/9/3 20:59, Maxime Coquelin wrote:
> Hi,
> 
> On 9/2/21 3:39 PM, fengchengwen wrote:
>> Fix in v19
>>
>> I think there many patches wait for dmadev framework upstream, so
>> could you help review unreviewd patches (like dma/skeleton and app/test)?
> 
> Thanks for all the work, it looks really promising!
> 
>> Also, we have developed the dma driver for hisilicon, and corresponding test
>> app (like examples/vhost and testpmd) is being developed.
>>
>> examples/vhost: will extend support dmadev type.
> 
> I think Sunil has posted RFC for Vhost lib and example, you might want
> to have a look to avoid duplicate work.

Got it, thanks

> 
>> testpmd: will introduces some dma control commands and the dma forwarding mode.
>>          the dma forwarding mode process:
>>               // 1st: call rte_eth_rx_burst
>>               // 2st: post received packet to dma, move data to another packet
>>               // 3st: set newcopyed rte_mbuf header
>>               // 4st: free received packet
>>               // 5st: get dma completed request and associate it rte_mbuf
>>               // 6st: send 5st's rte_mbuf to nic
>>
>> @Maxime @Chenbo @Xiaoyun Like to hear your opinion.
> 
> We might also think of adding async support to Vhost PMD, that would be
> another way to test dmadev with testpmd.

OK

> 
> Thanks,
> Maxime
> 
>>
>> On 2021/9/2 19:51, Bruce Richardson wrote:
>>> On Thu, Sep 02, 2021 at 06:54:17PM +0800, Chengwen Feng wrote:
>>>> This patch add myself as dmadev's maintainer and update release notes.
>>>>
>>>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com> ---
>>>
>>> Just in case you are doing any further revisions of this patchset, the
>>> maintainers entry, and RN entry, generally is added in the first patch, so
>>> squash this patch into patch #1.
>>> .
>>>
>>
> 
> .
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 6/7] dma/skeleton: introduce skeleton dmadev driver
  2021-09-03 15:14     ` Kevin Laatz
@ 2021-09-04  7:17       ` fengchengwen
  0 siblings, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-09-04  7:17 UTC (permalink / raw)
  To: Kevin Laatz, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

On 2021/9/3 23:14, Kevin Laatz wrote:
> On 02/09/2021 14:13, Chengwen Feng wrote:
>> Skeleton dmadevice driver, on the lines of rawdev skeleton, is for
>> showcasing of the dmadev library.
>>
>> Design of skeleton involves a virtual device which is plugged into VDEV
>> bus on initialization.
>>
>> Also, enable compilation of dmadev skeleton drivers.
>>
>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
>> ---
>>   MAINTAINERS                            |   1 +
>>   drivers/dma/meson.build                |  11 +
>>   drivers/dma/skeleton/meson.build       |   7 +
>>   drivers/dma/skeleton/skeleton_dmadev.c | 601 +++++++++++++++++++++++++++++++++
>>   drivers/dma/skeleton/skeleton_dmadev.h |  59 ++++
>>   drivers/dma/skeleton/version.map       |   3 +
>>   drivers/meson.build                    |   1 +
>>   7 files changed, 683 insertions(+)
>>   create mode 100644 drivers/dma/meson.build
>>   create mode 100644 drivers/dma/skeleton/meson.build
>>   create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
>>   create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
>>   create mode 100644 drivers/dma/skeleton/version.map
>>

snip

>>
>> +
>> +static int
>> +vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc)
>> +{
>> +    struct skeldma_desc *desc;
>> +    struct rte_ring *empty;
>> +    struct rte_ring *pending;
>> +    struct rte_ring *running;
>> +    struct rte_ring *completed;
>> +    uint16_t i;
>> +
>> +    desc = rte_zmalloc_socket("dma_skelteon_desc",
>> +                  nb_desc * sizeof(struct skeldma_desc),
>> +                  RTE_CACHE_LINE_SIZE, hw->socket_id);
>> +    if (desc == NULL) {
>> +        SKELDMA_LOG(ERR, "Malloc dma skeleton desc fail!");
>> +        return -ENOMEM;
>> +    }
>> +
>> +    empty = rte_ring_create("dma_skeleton_desc_empty", nb_desc,
>> +                hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
>> +    pending = rte_ring_create("dma_skeleton_desc_pending", nb_desc,
>> +                  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
>> +    running = rte_ring_create("dma_skeleton_desc_running", nb_desc,
>> +                  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
>> +    completed = rte_ring_create("dma_skeleton_desc_completed", nb_desc,
>> +                  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
>> +    if (empty == NULL || pending == NULL || running == NULL ||
>> +        completed == NULL) {
>> +        SKELDMA_LOG(ERR, "Create dma skeleton desc ring fail!");
>> +        rte_ring_free(empty);
>> +        rte_ring_free(pending);
>> +        rte_ring_free(running);
>> +        rte_ring_free(completed);
>> +        rte_free(desc);
> 
> These pointers should be set to NULL after free'ing, similar to what you have in "vchan_release()".
> 

These pointers are local variables, Need to clean them up?

The set to NULL operation in 'vhcan_release', is because those
pointers are held by dmadev with a longer life cycle.

Thanks

> 
>> +        return -ENOMEM;
>> +    }
>> +
>> +    /* The real usable ring size is *count-1* instead of *count* to
>> +     * differentiate a free ring from an empty ring.
>> +     * @see rte_ring_create
>> +     */
>> +    for (i = 0; i < nb_desc - 1; i++)
>> +        (void)rte_ring_enqueue(empty, (void *)(desc + i));
>> +
>> +    hw->desc_mem = desc;
>> +    hw->desc_empty = empty;
>> +    hw->desc_pending = pending;
>> +    hw->desc_running = running;
>> +    hw->desc_completed = completed;
>> +
>> +    return 0;
>> +}
>> +
>> +static void
>> +vchan_release(struct skeldma_hw *hw)
>> +{
>> +    if (hw->desc_mem == NULL)
>> +        return;
>> +
>> +    rte_free(hw->desc_mem);
>> +    hw->desc_mem = NULL;
>> +    rte_ring_free(hw->desc_empty);
>> +    hw->desc_empty = NULL;
>> +    rte_ring_free(hw->desc_pending);
>> +    hw->desc_pending = NULL;
>> +    rte_ring_free(hw->desc_running);
>> +    hw->desc_running = NULL;
>> +    rte_ring_free(hw->desc_completed);
>> +    hw->desc_completed = NULL;
>> +}
>> +
>>
> <snip>
> 
> With the minor comments above addressed,
> 
> Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
> 
> 
> 
> .

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 7/7] app/test: add dmadev API test
  2021-09-03 15:38         ` Walsh, Conor
@ 2021-09-04  7:22           ` fengchengwen
  0 siblings, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-09-04  7:22 UTC (permalink / raw)
  To: Walsh, Conor, thomas, Yigit, Ferruh, Richardson,  Bruce, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor, Ananyev,
	Konstantin, Laatz, Kevin

On 2021/9/3 23:38, Walsh, Conor wrote:
> 
>> This is a tradeoff point. If we changed the log level of dmadev, it is difficult to
>> know where the test case fails.
>>
>> So I prefer add more meaningful information, at least print out the function
>> name.
>>
>> And V19 add format function name in log default, so the rte_dmadev's log
>> will show like:
>>
>> rte_dmadev_configure(): Invalid dev_id=64
>> rte_dmadev_configure(): Device 4 configure zero vchans
>> rte_dmadev_configure(): Device 4 configure too many vchans
>> rte_dmadev_configure(): Device 4 don't support silent
>> test_dmadev_configure Passed
>> rte_dmadev_vchan_setup(): Invalid dev_id=64
>> rte_dmadev_vchan_setup(): Device 4 number of descriptors invalid
>> rte_dmadev_vchan_setup(): Device 4 vchan out range!
>> rte_dmadev_vchan_setup(): Device 4 direction invalid!
>> rte_dmadev_vchan_setup(): Device 4 direction invalid!
>> rte_dmadev_vchan_setup(): Device 4 don't support mem2dev transfer
>> rte_dmadev_vchan_setup(): Device 4 don't support dev2mem transfer
>> rte_dmadev_vchan_setup(): Device 4 don't support dev2dev transfer
>> rte_dmadev_vchan_setup(): Device 4 number of descriptors invalid
>> rte_dmadev_vchan_setup(): Device 4 number of descriptors invalid
>> rte_dmadev_vchan_setup(): Device 4 source port type invalid
>> rte_dmadev_vchan_setup(): Device 4 destination port type invalid
> 
> I thought it would be cleaner, could you suppress the output only before and re-enable after negative testing?
> It's a lot of output to print to a user, in the driver tests extra information is only printed in the case
> of an error or failure. The line of code shown below will only suppress output from the dmadev lib not
> EAL or test.

Yes, you're right, UT is fine enough to quickly pinpoint which error.

will use your method to modify, thanks

> 
> I don’t have a very strong opinion either way, I wanted to improve usability.
> 
> With or without this change/cleanup:
> 
> Reviewed-by: Conor Walsh <conor.walsh@intel.com>
> 
>>
>>
>> On 2021/9/2 22:11, Walsh, Conor wrote:
>>> Hi Chengwen,
>>>
>>> The output from the API tests is not very straightforward to interpret if you
>> are not familiar with these tests.
>>> Could we change the log level of the dmadev library before and after the
>> API tests using something similar to
>>> The code I have included inline below?
>>>
>>>> +static int
>>>> +testsuite_setup(uint16_t dev_id)
>>>> +{
>>>> +	test_dev_id = dev_id;
>>>> +	invalid_dev_id = RTE_DMADEV_MAX_DEVS;
>>>> +
>>>> +	src = rte_malloc("dmadev_test_src", TEST_MEMCPY_SIZE, 0);
>>>> +	if (src == NULL)
>>>> +		return -ENOMEM;
>>>> +	dst = rte_malloc("dmadev_test_dst", TEST_MEMCPY_SIZE, 0);
>>>> +	if (dst == NULL)
>>>> +		return -ENOMEM;
>>>
>>> 	/* Set dmadev log level to critical to suppress unnecessary output
>> during API tests. */
>>> 	rte_log_set_level_pattern("lib.dmadev", RTE_LOG_CRIT);
>>>> +
>>>> +	total = 0;
>>>> +	passed = 0;
>>>> +	failed = 0;
>>>> +
>>>> +	return 0;
>>>> +}
>>>> +
>>>> +static void
>>>> +testsuite_teardown(void)
>>>> +{
>>>> +	rte_free(src);
>>>> +	rte_free(dst);
>>>> +	/* Ensure the dmadev is stopped. */
>>>> +	rte_dmadev_stop(test_dev_id);
>>> 	rte_log_set_level_pattern("lib.dmadev", RTE_LOG_INFO);
>>>> +}
>>>
>>> This change would bring your output down from:
>>>
>>> ### Test dmadev infrastructure using skeleton driver
>>> test_dmadev_get_dev_id Passed
>>> test_dmadev_is_valid_dev Passed
>>> test_dmadev_count Passed
>>> Invalid dev_id=64
>>> test_dmadev_info_get Passed
>>> Invalid dev_id=64
>>> Device 1 configure zero vchans
>>> Device 1 configure too many vchans
>>> Device 1 don't support silent
>>> test_dmadev_configure Passed
>>> Invalid dev_id=64
>>> Device 1 number of descriptors invalid
>>> Device 1 vchan out range!
>>> Device 1 direction invalid!
>>> Device 1 direction invalid!
>>> Device 1 don't support mem2dev transfer
>>> Device 1 don't support dev2mem transfer
>>> Device 1 don't support dev2dev transfer
>>> Device 1 number of descriptors invalid
>>> Device 1 number of descriptors invalid
>>> Device 1 source port type invalid
>>> Device 1 destination port type invalid
>>> test_dmadev_vchan_setup Passed
>>> Invalid dev_id=64
>>> Invalid dev_id=64
>>> Device 1 must be stopped to allow configuration
>>> Device 1 must be stopped to allow configuration
>>> test_dmadev_start_stop Passed
>>> Invalid dev_id=64
>>> Invalid dev_id=64
>>> Invalid dev_id=64
>>> Device 1 vchan 1 out of range
>>> Device 1 vchan 1 out of range
>>> test_dmadev_stats Passed
>>> test_dmadev_completed Passed
>>> test_dmadev_completed_status Passed
>>> Device 1 already stopped
>>> Total tests   : 10
>>> Passed        : 10
>>> Failed        : 0
>>> skeldma_remove(): Remove dma_skeleton dmadev
>>>
>>> To:
>>>
>>> ### Test dmadev infrastructure using skeleton driver
>>> test_dmadev_get_dev_id Passed
>>> test_dmadev_is_valid_dev Passed
>>> test_dmadev_count Passed
>>> test_dmadev_info_get Passed
>>> test_dmadev_configure Passed
>>> test_dmadev_vchan_setup Passed
>>> test_dmadev_start_stop Passed
>>> test_dmadev_stats Passed
>>> test_dmadev_completed Passed
>>> test_dmadev_completed_status Passed
>>> Total tests   : 10
>>> Passed        : 10
>>> Failed        : 0
>>> skeldma_remove(): Remove dma_skeleton dmadev
>>>
>>> Thanks,
>>> Conor.
>>> .
>>>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 4/7] dmadev: introduce DMA device library implementation
  2021-09-03 15:35     ` Conor Walsh
@ 2021-09-04  8:52       ` fengchengwen
  0 siblings, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-09-04  8:52 UTC (permalink / raw)
  To: Conor Walsh, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev

On 2021/9/3 23:35, Conor Walsh wrote:
> 
>> This patch introduce DMA device library implementation which includes
>> configuration and I/O with the DMA devices.
>>
>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
>> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
>> Acked-by: Morten Brørup <mb@smartsharesystems.com>
>> ---
> <snip>
>> +
>> +static int
>> +dmadev_shared_data_prepare(void)
>> +{
>> +    const struct rte_memzone *mz;
>> +
>> +    if (dmadev_shared_data == NULL) {
>> +        if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
>> +            /* Allocate port data and ownership shared memory. */
>> +            mz = rte_memzone_reserve(mz_rte_dmadev_data,
>> +                     sizeof(*dmadev_shared_data),
>> +                     rte_socket_id(), 0);
>> +        } else
>> +            mz = rte_memzone_lookup(mz_rte_dmadev_data);
>> +        if (mz == NULL)
>> +            return -ENOMEM;
> 
> This memzone is not free'd anywhere in the library, I would suggest freeing as part of the release pmd function.

All dmadevs data is in a contiguous area.
When you release a pmd, you can't release some of it.

This is more common in other libs, so I recommend keeping it as it is.

Thanks

> 
> <snip>
> 
> Reviewed-by: Conor Walsh <conor.walsh@intel.com>
> 
> .

^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v20 0/7] support dmadev
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (22 preceding siblings ...)
  2021-09-02 13:13 ` [dpdk-dev] [PATCH v19 0/7] support dmadev Chengwen Feng
@ 2021-09-04 10:10 ` Chengwen Feng
  2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 1/7] dmadev: introduce DMA device library public APIs Chengwen Feng
                     ` (7 more replies)
  2021-09-07 12:56 ` [dpdk-dev] [PATCH v21 " Chengwen Feng
                   ` (5 subsequent siblings)
  29 siblings, 8 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-04 10:10 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch set contains seven patch for new add dmadev.

Chengwen Feng (7):
  dmadev: introduce DMA device library public APIs
  dmadev: introduce DMA device library internal header
  dmadev: introduce DMA device library PMD header
  dmadev: introduce DMA device library implementation
  doc: add DMA device library guide
  dma/skeleton: introduce skeleton dmadev driver
  app/test: add dmadev API test

---
v20:
* delete unnecessary and duplicate include header files.
* the conf_sz parameter is added to the configure and vchan-setup
  callbacks of the PMD, this is mainly used to enhance ABI
  compatibility.
* the rte_dmadev structure field is rearranged to reserve more space
  for I/O functions.
* fix some ambiguous and unnecessary comments.
* fix the potential memory leak of ut.
* redefine skeldma_init_once to skeldma_count.
* suppress rte_dmadev error output when execute ut.
v19:
* squash maintainer patch to patch #1.
v18:
* RTE_DMA_STATUS_* add BUS_READ/WRITE_ERR, PAGE_FAULT.
* rte_dmadev dataplane API add judge dev_started when debug enable.
* rte_dmadev_start/vchan_setup add judge device configured.
* rte_dmadev_dump support format capability name.
* optimized the comments of rte_dmadev.
* fix skeldma_copy always return zero when enqueue successful.
* log encapsulation macro add newline characters.
* test_dmadev_api support rte_dmadev_dump() ut.
v17:
* remove rte_dmadev_selftest() API.
* move dmadev API test from dma/skeleton to app/test.
* fix compile error of dma/skeleton driver when building for x86-32.
* fix iol spell check warning of dmadev.rst.

 MAINTAINERS                            |    7 +
 app/test/meson.build                   |    4 +
 app/test/test_dmadev.c                 |   43 +
 app/test/test_dmadev_api.c             |  543 ++++++++++++
 config/rte_config.h                    |    3 +
 doc/api/doxy-api-index.md              |    1 +
 doc/api/doxy-api.conf.in               |    1 +
 doc/guides/prog_guide/dmadev.rst       |  125 +++
 doc/guides/prog_guide/img/dmadev.svg   |  283 +++++++
 doc/guides/prog_guide/index.rst        |    1 +
 doc/guides/rel_notes/release_21_11.rst |    5 +
 drivers/dma/meson.build                |   11 +
 drivers/dma/skeleton/meson.build       |    7 +
 drivers/dma/skeleton/skeleton_dmadev.c |  594 ++++++++++++++
 drivers/dma/skeleton/skeleton_dmadev.h |   61 ++
 drivers/dma/skeleton/version.map       |    3 +
 drivers/meson.build                    |    1 +
 lib/dmadev/meson.build                 |    7 +
 lib/dmadev/rte_dmadev.c                |  607 ++++++++++++++
 lib/dmadev/rte_dmadev.h                | 1047 ++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h           |  183 +++++
 lib/dmadev/rte_dmadev_pmd.h            |   72 ++
 lib/dmadev/version.map                 |   35 +
 lib/meson.build                        |    1 +
 24 files changed, 3645 insertions(+)
 create mode 100644 app/test/test_dmadev.c
 create mode 100644 app/test/test_dmadev_api.c
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg
 create mode 100644 drivers/dma/meson.build
 create mode 100644 drivers/dma/skeleton/meson.build
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
 create mode 100644 drivers/dma/skeleton/version.map
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v20 1/7] dmadev: introduce DMA device library public APIs
  2021-09-04 10:10 ` [dpdk-dev] [PATCH v20 0/7] support dmadev Chengwen Feng
@ 2021-09-04 10:10   ` Chengwen Feng
  2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 2/7] dmadev: introduce DMA device library internal header Chengwen Feng
                     ` (6 subsequent siblings)
  7 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-04 10:10 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

The 'dmadevice' is a generic type of DMA device.

This patch introduce the 'dmadevice' public APIs which expose generic
operations that can enable configuration and I/O with the DMA devices.

Maintainers update is also included in this patch.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 MAINTAINERS                            |   4 +
 doc/api/doxy-api-index.md              |   1 +
 doc/api/doxy-api.conf.in               |   1 +
 doc/guides/rel_notes/release_21_11.rst |   5 +
 lib/dmadev/meson.build                 |   4 +
 lib/dmadev/rte_dmadev.h                | 951 +++++++++++++++++++++++++
 lib/dmadev/version.map                 |  24 +
 lib/meson.build                        |   1 +
 8 files changed, 991 insertions(+)
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 1e0d303394..9885cc56b7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -496,6 +496,10 @@ F: drivers/raw/skeleton/
 F: app/test/test_rawdev.c
 F: doc/guides/prog_guide/rawdev.rst
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+
 
 Memory Pool Drivers
 -------------------
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107a03..ce08250639 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -27,6 +27,7 @@ The public API headers are grouped by topics:
   [event_timer_adapter]    (@ref rte_event_timer_adapter.h),
   [event_crypto_adapter]   (@ref rte_event_crypto_adapter.h),
   [rawdev]             (@ref rte_rawdev.h),
+  [dmadev]             (@ref rte_dmadev.h),
   [metrics]            (@ref rte_metrics.h),
   [bitrate]            (@ref rte_bitrate.h),
   [latency]            (@ref rte_latencystats.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 325a0195c6..a44a92b5fe 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -34,6 +34,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/cmdline \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
+                          @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/distributor \
                           @TOPDIR@/lib/efd \
                           @TOPDIR@/lib/ethdev \
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index d707a554ef..78b9691bf3 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -55,6 +55,11 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Added dmadev library support.**
+
+  The dmadev library provides a DMA device framework for management and
+  provision of hardware and software DMA devices.
+
 
 Removed Items
 -------------
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000000..6d5bd85373
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+headers = files('rte_dmadev.h')
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000000..76d71615eb
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,951 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ * Copyright(c) 2021 Marvell International Ltd.
+ * Copyright(c) 2021 SmartShare Systems.
+ */
+
+#ifndef _RTE_DMADEV_H_
+#define _RTE_DMADEV_H_
+
+/**
+ * @file rte_dmadev.h
+ *
+ * RTE DMA (Direct Memory Access) device APIs.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW-DMA-channel |               | HW-DMA-channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW-DMA-Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel. e.g. Application could create
+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+ * virtual DMA channel 1 for memory-to-device transfer scenario.
+ *
+ * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
+ * PCI/SoC device probing phase performed at EAL initialization time. And could
+ * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
+ * phase.
+ *
+ * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dmadev_configure()
+ *     - rte_dmadev_vchan_setup()
+ *     - rte_dmadev_start()
+ *
+ * Then, the application can invoke dataplane APIs to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dmadev_configure() or rte_dmadev_vchan_setup()), it must invoke
+ * rte_dmadev_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dmadev_start() again. The dataplane APIs should not be
+ * invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the
+ * rte_dmadev_close() function.
+ *
+ * The dataplane APIs include two parts:
+ * The first part is the submission of operation requests:
+ *     - rte_dmadev_copy()
+ *     - rte_dmadev_copy_sg()
+ *     - rte_dmadev_fill()
+ *     - rte_dmadev_submit()
+ *
+ * These APIs could work with different virtual DMA channels which have
+ * different contexts.
+ *
+ * The first three APIs are used to submit the operation request to the virtual
+ * DMA channel, if the submission is successful, an uint16_t ring_idx is
+ * returned, otherwise a negative number is returned.
+ *
+ * The last API was used to issue doorbell to hardware, and also there are flags
+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
+ * same work.
+ *
+ * The second part is to obtain the result of requests:
+ *     - rte_dmadev_completed()
+ *         - return the number of operation requests completed successfully.
+ *     - rte_dmadev_completed_status()
+ *         - return the number of operation requests completed.
+ *
+ * @note The two completed APIs also support return the last completed
+ * operation's ring_idx.
+ * @note If the dmadev works in silent mode (@see RTE_DMADEV_CAPA_SILENT),
+ * application does not invoke the above two completed APIs.
+ *
+ * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
+ * rte_dmadev_fill()) returned, the rules are as follows:
+ *     - ring_idx for each virtual DMA channel are independent.
+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *       when it reach UINT16_MAX, it wraps back to zero.
+ *     - This ring_idx can be used by applications to track per-operation
+ *       metadata in an application-defined circular ring.
+ *     - The initial ring_idx of a virtual DMA channel is zero, after the
+ *       device is stopped, the ring_idx needs to be reset to zero.
+ *
+ * One example:
+ *     - step-1: start one dmadev
+ *     - step-2: enqueue a copy operation, the ring_idx return is 0
+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1
+ *     - ...
+ *     - step-101: stop the dmadev
+ *     - step-102: start the dmadev
+ *     - step-103: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *
+ * The DMA operation address used in enqueue APIs (i.e. rte_dmadev_copy(),
+ * rte_dmadev_copy_sg(), rte_dmadev_fill()) defined as rte_iova_t type. The
+ * dmadev supports two types of address: memory address and device address.
+ *
+ * - memory address: the source and destination address of the memory-to-memory
+ * transfer type, or the source address of the memory-to-device transfer type,
+ * or the destination address of the device-to-memory transfer type.
+ * @note If the device support SVA (@see RTE_DMADEV_CAPA_SVA), the memory
+ * address can be any VA address, otherwise it must be an IOVA address.
+ *
+ * - device address: the source and destination address of the device-to-device
+ * transfer type, or the source address of the device-to-memory transfer type,
+ * or the destination address of the memory-to-device transfer type.
+ *
+ * By default, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
+ *
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the device identifier for the named DMA device.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   Returns DMA device identifier on success.
+ *   - <0: Failure to find named DMA device.
+ */
+__rte_experimental
+int
+rte_dmadev_get_dev_id(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_count(void);
+
+/* Enumerates DMA device capabilities. */
+#define RTE_DMADEV_CAPA_MEM_TO_MEM	(1ull << 0)
+/**< DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_MEM_TO_DEV	(1ull << 1)
+/**< DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_MEM	(1ull << 2)
+/**< DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_DEV	(1ull << 3)
+/**< DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_SVA		(1ull << 4)
+/**< DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_SILENT		(1ull << 5)
+/**< DMA device support work in silent mode.
+ * In this mode, application don't required to invoke rte_dmadev_completed*()
+ * API.
+ *
+ * @see struct rte_dmadev_conf::silent_mode
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY	(1ull << 32)
+/**< DMA device support copy ops.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY_SG	(1ull << 33)
+/**< DMA device support scatter-gather list copy ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_FILL	(1ull << 34)
+/**< DMA device support fill ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ */
+struct rte_dmadev_info {
+	struct rte_device *device; /**< Generic Device information. */
+	uint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channels supported. */
+	uint16_t max_desc;
+	/**< Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/**< Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t max_sges;
+	/**< Maximum number of source or destination scatter-gather entry
+	 * supported.
+	 * If the device does not support COPY_SG capability, this value can be
+	 * zero.
+	 * If the device supports COPY_SG capability, then rte_dmadev_copy_sg()
+	 * parameter nb_src/nb_dst should not exceed this value.
+	 */
+	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ */
+struct rte_dmadev_conf {
+	uint16_t nb_vchans;
+	/**< The number of virtual DMA channels to set up for the DMA device.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dmadev_info which get from rte_dmadev_info_get().
+	 */
+	bool enable_silent;
+	/**< Indicates whether to enable silent mode.
+	 * false-default mode, true-silent mode.
+	 * This value can be set to true only when the SILENT capability is
+	 * supported.
+	 *
+	 * @see RTE_DMADEV_CAPA_SILENT
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dmadev_conf
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_start(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dmadev_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_stop(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_close(uint16_t dev_id);
+
+/**
+ * DMA transfer direction defines.
+ */
+enum rte_dma_direction {
+	RTE_DMA_DIR_MEM_TO_MEM,
+	/**< DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV,
+	/**< DMA transfer direction - from memory to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is SoCs memory) to device (which is host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM,
+	/**< DMA transfer direction - from device to memory.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to memory (which is SoCs memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV,
+	/**< DMA transfer direction - from device to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to the device (which is another host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+};
+
+/**
+ * enum rte_dmadev_port_type - DMA access port type defines.
+ *
+ * @see struct rte_dmadev_port_param::port_type
+ */
+enum rte_dmadev_port_type {
+	RTE_DMADEV_PORT_NONE,
+	RTE_DMADEV_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dmadev_vchan_conf::src_port
+ * @see struct rte_dmadev_vchan_conf::dst_port
+ */
+struct rte_dmadev_port_param {
+	enum rte_dmadev_port_type port_type;
+	/**< The device access port type.
+	 * @see enum rte_dmadev_port_type
+	 */
+	union {
+		/** PCIe access port parameters.
+		 *
+		 * The following model shows SoC's PCIe module connects to
+		 * multiple PCIe hosts and multiple endpoints. The PCIe module
+		 * has an integrated DMA controller.
+		 *
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * \code{.unparsed}
+		 * System Bus
+		 *    |     ----------PCIe module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIe Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIe Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIe Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * \endcode
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		struct {
+			uint64_t coreid : 4; /**< PCIe core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			uint64_t pasid : 20;
+			/**< The pasid filed in TLP packet. */
+			uint64_t attr : 3;
+			/**< The attributes filed in TLP packet. */
+			uint64_t ph : 2;
+			/**< The processing hint filed in TLP packet. */
+			uint64_t st : 16;
+			/**< The steering tag filed in TLP packet. */
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ */
+struct rte_dmadev_vchan_conf {
+	enum rte_dma_direction direction;
+	/**< Transfer direction
+	 * @see enum rte_dma_direction
+	 */
+	uint16_t nb_desc;
+	/**< Number of descriptor for the virtual DMA channel */
+	struct rte_dmadev_port_param src_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+	struct rte_dmadev_port_param dst_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel. The value must be in the range
+ *   [0, nb_vchans - 1] previously supplied to rte_dmadev_configure().
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dmadev_vchan_conf object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+		       const struct rte_dmadev_vchan_conf *conf);
+
+/**
+ * A structure used to retrieve statistics.
+ */
+struct rte_dmadev_stats {
+	uint64_t submitted;
+	/**< Count of operations which were submitted to hardware. */
+	uint64_t completed;
+	/**< Count of operations which were completed, including successful and
+	 * failed completions.
+	 */
+	uint64_t errors;
+	/**< Count of operations which failed to complete. */
+};
+
+#define RTE_DMADEV_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dmadev_stats
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f);
+
+/**
+ * rte_dma_status_code - DMA transfer result status code defines.
+ */
+enum rte_dma_status_code {
+	RTE_DMA_STATUS_SUCCESSFUL,
+	/**< The operation completed successfully. */
+	RTE_DMA_STATUS_USER_ABORT,
+	/**< The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPTED,
+	/**< The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/**< The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/**< The operation failed to complete due invalid destination
+	 * address.
+	 */
+	RTE_DMA_STATUS_INVALID_ADDR,
+	/**< The operation failed to complete due invalid source or destination
+	 * address, cover the case that only knows the address error, but not
+	 * sure which address error.
+	 */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/**< The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/**< The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_BUS_READ_ERROR,
+	/**< The operation failed to complete due bus read error. */
+	RTE_DMA_STATUS_BUS_WRITE_ERROR,
+	/**< The operation failed to complete due bus write error. */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/**< The operation failed to complete due bus error, cover the case that
+	 * only knows the bus error, but not sure which direction error.
+	 */
+	RTE_DMA_STATUS_DATA_POISION,
+	/**< The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/**< The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/**< The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the memory-to-device/
+	 * device-to-memory/device-to-device transfer scenario.
+	 */
+	RTE_DMA_STATUS_PAGE_FAULT,
+	/**< The operation failed to complete due lookup page fault. */
+	RTE_DMA_STATUS_ERROR_UNKNOWN = 0x100,
+	/**< The operation failed to complete due unknown reason.
+	 * The initial value is 256, which reserves space for future errors.
+	 */
+};
+
+/**
+ * A structure used to hold scatter-gather DMA operation request entry.
+ */
+struct rte_dmadev_sge {
+	rte_iova_t addr; /**< The DMA operation address. */
+	uint32_t length; /**< The DMA operation length. */
+};
+
+/* DMA flags to augment operation preparation. */
+#define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
+/**< DMA fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ * If the specify DMA HW works in-order (it means it has default fence between
+ * operations), this flag could be NOP.
+ *
+ * @see rte_dmadev_copy()
+ * @see rte_dmadev_copy_sg()
+ * @see rte_dmadev_fill()
+ */
+
+#define RTE_DMA_OP_FLAG_SUBMIT	(1ull << 1)
+/**< DMA submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ */
+
+#define RTE_DMA_OP_FLAG_LLC	(1ull << 2)
+/**< DMA write data to low level cache hint.
+ * Used for performance optimization, this is just a hint, and there is no
+ * capability bit for this, driver should not return error if this flag was set.
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+		uint32_t length, uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter-gather list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter-gather list copy operation to be performed by
+ * hardware, if the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then
+ * trigger doorbell to begin this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The pointer of source scatter-gather entry array.
+ * @param dst
+ *   The pointer of destination scatter-gather entry array.
+ * @param nb_src
+ *   The number of source scatter-gather entry.
+ *   @see struct rte_dmadev_info::max_sges
+ * @param nb_dst
+ *   The number of destination scatter-gather entry.
+ *   @see struct rte_dmadev_info::max_sges
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
+		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
+		   uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
+		rte_iova_t dst, uint32_t length, uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dmadev_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's ring_idx.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed. This return value
+ *   must be less than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		     uint16_t *last_idx, bool *has_error);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been completed, and the
+ * operations result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's ring_idx.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   This is a pointer to an array of length 'nb_cpls' that holds the completion
+ *   status code of each operation.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that completed. This return value must be less
+ *   than or equal to the value of nb_cpls.
+ *   If this number is greater than zero (assuming n), then n values in the
+ *   status array are also set.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
+			    const uint16_t nb_cpls, uint16_t *last_idx,
+			    enum rte_dma_status_code *status);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000000..2e37882364
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,24 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dmadev_close;
+	rte_dmadev_completed;
+	rte_dmadev_completed_status;
+	rte_dmadev_configure;
+	rte_dmadev_copy;
+	rte_dmadev_copy_sg;
+	rte_dmadev_count;
+	rte_dmadev_dump;
+	rte_dmadev_fill;
+	rte_dmadev_get_dev_id;
+	rte_dmadev_info_get;
+	rte_dmadev_is_valid_dev;
+	rte_dmadev_start;
+	rte_dmadev_stats_get;
+	rte_dmadev_stats_reset;
+	rte_dmadev_stop;
+	rte_dmadev_submit;
+	rte_dmadev_vchan_setup;
+
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4323..a542c238d2 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -44,6 +44,7 @@ libraries = [
         'power',
         'pdump',
         'rawdev',
+        'dmadev',
         'regexdev',
         'rib',
         'reorder',
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v20 2/7] dmadev: introduce DMA device library internal header
  2021-09-04 10:10 ` [dpdk-dev] [PATCH v20 0/7] support dmadev Chengwen Feng
  2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 1/7] dmadev: introduce DMA device library public APIs Chengwen Feng
@ 2021-09-04 10:10   ` Chengwen Feng
  2021-09-06 13:35     ` Bruce Richardson
  2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 3/7] dmadev: introduce DMA device library PMD header Chengwen Feng
                     ` (5 subsequent siblings)
  7 siblings, 1 reply; 339+ messages in thread
From: Chengwen Feng @ 2021-09-04 10:10 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch introduce DMA device library internal header, which contains
internal data types that are used by the DMA devices in order to expose
their ops to the class.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 lib/dmadev/meson.build       |   1 +
 lib/dmadev/rte_dmadev_core.h | 181 +++++++++++++++++++++++++++++++++++
 2 files changed, 182 insertions(+)
 create mode 100644 lib/dmadev/rte_dmadev_core.h

diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index 6d5bd85373..f421ec1909 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -2,3 +2,4 @@
 # Copyright(c) 2021 HiSilicon Limited.
 
 headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000000..1ee8971c40
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#ifndef _RTE_DMADEV_CORE_H_
+#define _RTE_DMADEV_CORE_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device internal header.
+ *
+ * This header contains internal data types, that are used by the DMA devices
+ * in order to expose their ops to the class.
+ *
+ * Applications should not use these API directly.
+ *
+ */
+
+struct rte_dmadev;
+
+typedef int (*rte_dmadev_info_get_t)(const struct rte_dmadev *dev,
+				     struct rte_dmadev_info *dev_info,
+				     uint32_t info_sz);
+/**< @internal Used to get device information of a device. */
+
+typedef int (*rte_dmadev_configure_t)(struct rte_dmadev *dev,
+				      const struct rte_dmadev_conf *dev_conf,
+				      uint32_t conf_sz);
+/**< @internal Used to configure a device. */
+
+typedef int (*rte_dmadev_start_t)(struct rte_dmadev *dev);
+/**< @internal Used to start a configured device. */
+
+typedef int (*rte_dmadev_stop_t)(struct rte_dmadev *dev);
+/**< @internal Used to stop a configured device. */
+
+typedef int (*rte_dmadev_close_t)(struct rte_dmadev *dev);
+/**< @internal Used to close a configured device. */
+
+typedef int (*rte_dmadev_vchan_setup_t)(struct rte_dmadev *dev, uint16_t vchan,
+				const struct rte_dmadev_vchan_conf *conf,
+				uint32_t conf_sz);
+/**< @internal Used to allocate and set up a virtual DMA channel. */
+
+typedef int (*rte_dmadev_stats_get_t)(const struct rte_dmadev *dev,
+			uint16_t vchan, struct rte_dmadev_stats *stats,
+			uint32_t stats_sz);
+/**< @internal Used to retrieve basic statistics. */
+
+typedef int (*rte_dmadev_stats_reset_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to reset basic statistics. */
+
+typedef int (*rte_dmadev_dump_t)(const struct rte_dmadev *dev, FILE *f);
+/**< @internal Used to dump internal information. */
+
+typedef int (*rte_dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 rte_iova_t src, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a copy operation. */
+
+typedef int (*rte_dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
+				    const struct rte_dmadev_sge *src,
+				    const struct rte_dmadev_sge *dst,
+				    uint16_t nb_src, uint16_t nb_dst,
+				    uint64_t flags);
+/**< @internal Used to enqueue a scatter-gather list copy operation. */
+
+typedef int (*rte_dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 uint64_t pattern, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a fill operation. */
+
+typedef int (*rte_dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to trigger hardware to begin working. */
+
+typedef uint16_t (*rte_dmadev_completed_t)(struct rte_dmadev *dev,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+/**< @internal Used to return number of successful completed operations. */
+
+typedef uint16_t (*rte_dmadev_completed_status_t)(struct rte_dmadev *dev,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+/**< @internal Used to return number of completed operations. */
+
+/**
+ * Possible states of a DMA device.
+ */
+enum rte_dmadev_state {
+	RTE_DMADEV_UNUSED = 0,
+	/**< Device is unused before being probed. */
+	RTE_DMADEV_ATTACHED,
+	/**< Device is attached when allocated in probing. */
+};
+
+/**
+ * DMA device operations function pointer table
+ */
+struct rte_dmadev_ops {
+	rte_dmadev_info_get_t       dev_info_get;
+	rte_dmadev_configure_t      dev_configure;
+	rte_dmadev_start_t          dev_start;
+	rte_dmadev_stop_t           dev_stop;
+	rte_dmadev_close_t          dev_close;
+
+	rte_dmadev_vchan_setup_t    vchan_setup;
+
+	rte_dmadev_stats_get_t      stats_get;
+	rte_dmadev_stats_reset_t    stats_reset;
+
+	rte_dmadev_dump_t           dev_dump;
+};
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_dmadev_data {
+	void *dev_private;
+	/**< PMD-specific private data.
+	 * This is a copy of the 'dev_private' field in the 'struct rte_dmadev'
+	 * from primary process, it is used by the secondary process to get
+	 * dev_private information.
+	 */
+	uint16_t dev_id; /**< Device [external] identifier. */
+	char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ *
+ * The dataplane APIs are located at the beginning of the structure, along
+ * with the pointer to where all the data elements for the particular device
+ * are stored in shared memory. This split scheme allows the function pointer
+ * and driver data to be per-process, while the actual configuration data for
+ * the device is shared.
+ * And the 'dev_private' field was placed in the first cache line to optimize
+ * performance because the PMD driver mainly depends on this field.
+ */
+struct rte_dmadev {
+	void *dev_private;
+	/**< PMD-specific private data.
+	 *
+	 * - If is the primary process, after dmadev allocated by
+	 * rte_dmadev_pmd_allocate(), the PCI/SoC device probing should
+	 * initialize this field, and copy it's value to the 'dev_private'
+	 * field of 'struct rte_dmadev_data' which pointer by 'data' filed.
+	 *
+	 * - If is the secondary process, dmadev framework will initialize this
+	 * field by copy from 'dev_private' field of 'struct rte_dmadev_data'
+	 * which initialized by primary process.
+	 *
+	 * @note It's the primary process responsibility to deinitialize this
+	 * field after invoke rte_dmadev_pmd_release() in the PCI/SoC device
+	 * removing stage.
+	 */
+	rte_dmadev_copy_t             copy;
+	rte_dmadev_copy_sg_t          copy_sg;
+	rte_dmadev_fill_t             fill;
+	rte_dmadev_submit_t           submit;
+	rte_dmadev_completed_t        completed;
+	rte_dmadev_completed_status_t completed_status;
+	void *reserved_ptr[7]; /**< Reserved for future IO function. */
+	struct rte_dmadev_data *data; /**< Pointer to device data. */
+	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
+	struct rte_device *device;
+	/**< Device info which supplied during device initialization. */
+	enum rte_dmadev_state state; /**< Flag indicating the device state. */
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+} __rte_cache_aligned;
+
+#endif /* _RTE_DMADEV_CORE_H_ */
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v20 3/7] dmadev: introduce DMA device library PMD header
  2021-09-04 10:10 ` [dpdk-dev] [PATCH v20 0/7] support dmadev Chengwen Feng
  2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 1/7] dmadev: introduce DMA device library public APIs Chengwen Feng
  2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 2/7] dmadev: introduce DMA device library internal header Chengwen Feng
@ 2021-09-04 10:10   ` Chengwen Feng
  2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 4/7] dmadev: introduce DMA device library implementation Chengwen Feng
                     ` (4 subsequent siblings)
  7 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-04 10:10 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch introduce DMA device library PMD header which was driver
facing APIs for a DMA device.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 lib/dmadev/meson.build      |  1 +
 lib/dmadev/rte_dmadev.h     |  2 ++
 lib/dmadev/rte_dmadev_pmd.h | 72 +++++++++++++++++++++++++++++++++++++
 lib/dmadev/version.map      | 10 ++++++
 4 files changed, 85 insertions(+)
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h

diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index f421ec1909..833baf7d54 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -3,3 +3,4 @@
 
 headers = files('rte_dmadev.h')
 indirect_headers += files('rte_dmadev_core.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 76d71615eb..c8dd0009f5 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -730,6 +730,8 @@ struct rte_dmadev_sge {
 	uint32_t length; /**< The DMA operation length. */
 };
 
+#include "rte_dmadev_core.h"
+
 /* DMA flags to augment operation preparation. */
 #define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
 /**< DMA fence flag.
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000000..45141f9dc1
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_PMD_H_
+#define _RTE_DMADEV_PMD_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device PMD APIs
+ *
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param dev
+ *   Device to be released.
+ *
+ * @return
+ *   - 0 on success, negative on error
+ */
+__rte_internal
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev);
+
+/**
+ * @internal
+ * Return the DMA device based on the device name.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_PMD_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index 2e37882364..d027eeac97 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -22,3 +22,13 @@ EXPERIMENTAL {
 
 	local: *;
 };
+
+INTERNAL {
+        global:
+
+	rte_dmadev_get_device_by_name;
+	rte_dmadev_pmd_allocate;
+	rte_dmadev_pmd_release;
+
+	local: *;
+};
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v20 4/7] dmadev: introduce DMA device library implementation
  2021-09-04 10:10 ` [dpdk-dev] [PATCH v20 0/7] support dmadev Chengwen Feng
                     ` (2 preceding siblings ...)
  2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 3/7] dmadev: introduce DMA device library PMD header Chengwen Feng
@ 2021-09-04 10:10   ` Chengwen Feng
  2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 5/7] doc: add DMA device library guide Chengwen Feng
                     ` (3 subsequent siblings)
  7 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-04 10:10 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch introduce DMA device library implementation which includes
configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 config/rte_config.h          |   3 +
 lib/dmadev/meson.build       |   1 +
 lib/dmadev/rte_dmadev.c      | 607 +++++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev.h      | 118 ++++++-
 lib/dmadev/rte_dmadev_core.h |   2 +
 lib/dmadev/version.map       |   1 +
 6 files changed, 720 insertions(+), 12 deletions(-)
 create mode 100644 lib/dmadev/rte_dmadev.c

diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c07d..331a431819 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -81,6 +81,9 @@
 /* rawdev defines */
 #define RTE_RAWDEV_MAX_DEVS 64
 
+/* dmadev defines */
+#define RTE_DMADEV_MAX_DEVS 64
+
 /* ip_fragmentation defines */
 #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
 #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index 833baf7d54..d2fc85e8c7 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2021 HiSilicon Limited.
 
+sources = files('rte_dmadev.c')
 headers = files('rte_dmadev.h')
 indirect_headers += files('rte_dmadev_core.h')
 driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000000..ee8db9aaca
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,607 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <inttypes.h>
+
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_memzone.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
+
+static const char *mz_rte_dmadev_data = "rte_dmadev_data";
+/* Shared memory between primary and secondary processes. */
+static struct {
+	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
+} *dmadev_shared_data;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dmadev_logtype, INFO);
+#define RTE_DMADEV_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "%s(): " fmt "\n", \
+		__func__, ##args)
+
+/* Macros to check for valid device id */
+#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dmadev_is_valid_dev(dev_id)) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+static int
+dmadev_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMADEV_LOG(ERR, "Name can't be NULL");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMADEV_LOG(ERR, "Zero length DMA device name");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
+		RTE_DMADEV_LOG(ERR, "DMA device name is too long");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint16_t
+dmadev_find_free_dev(void)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (dmadev_shared_data->data[i].dev_name[0] == '\0')
+			return i;
+	}
+
+	return RTE_DMADEV_MAX_DEVS;
+}
+
+static struct rte_dmadev*
+dmadev_find(const char *name)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&
+		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
+			return &rte_dmadevices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dmadev_shared_data_prepare(void)
+{
+	const struct rte_memzone *mz;
+
+	if (dmadev_shared_data == NULL) {
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			/* Allocate port data and ownership shared memory. */
+			mz = rte_memzone_reserve(mz_rte_dmadev_data,
+					 sizeof(*dmadev_shared_data),
+					 rte_socket_id(), 0);
+		} else
+			mz = rte_memzone_lookup(mz_rte_dmadev_data);
+		if (mz == NULL)
+			return -ENOMEM;
+
+		dmadev_shared_data = mz->addr;
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+			memset(dmadev_shared_data->data, 0,
+			       sizeof(dmadev_shared_data->data));
+	}
+
+	return 0;
+}
+
+static struct rte_dmadev *
+dmadev_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t dev_id;
+
+	dev = dmadev_find(name);
+	if (dev != NULL) {
+		RTE_DMADEV_LOG(ERR, "DMA device already allocated");
+		return NULL;
+	}
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data");
+		return NULL;
+	}
+
+	dev_id = dmadev_find_free_dev();
+	if (dev_id == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices");
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+	dev->data = &dmadev_shared_data->data[dev_id];
+	dev->data->dev_id = dev_id;
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+
+	return dev;
+}
+
+static struct rte_dmadev *
+dmadev_attach_secondary(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t i;
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data");
+		return NULL;
+	}
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %s is not driven by the primary process",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[i];
+	dev->data = &dmadev_shared_data->data[i];
+	dev->dev_private = dev->data->dev_private;
+
+	return dev;
+}
+
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dmadev_allocate(name);
+	else
+		dev = dmadev_attach_secondary(name);
+
+	if (dev == NULL)
+		return NULL;
+	dev->state = RTE_DMADEV_ATTACHED;
+
+	return dev;
+}
+
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev)
+{
+	void *dev_private_tmp;
+
+	if (dev == NULL)
+		return -EINVAL;
+
+	if (dev->state == RTE_DMADEV_UNUSED)
+		return 0;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
+
+	dev_private_tmp = dev->dev_private;
+	memset(dev, 0, sizeof(struct rte_dmadev));
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev->dev_private = dev_private_tmp;
+	dev->state = RTE_DMADEV_UNUSED;
+
+	return 0;
+}
+
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name)
+{
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+	return dmadev_find(name);
+}
+
+int
+rte_dmadev_get_dev_id(const char *name)
+{
+	struct rte_dmadev *dev = rte_dmadev_get_device_by_name(name);
+	if (dev != NULL)
+		return dev->data->dev_id;
+	return -EINVAL;
+}
+
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id)
+{
+	return (dev_id < RTE_DMADEV_MAX_DEVS) &&
+		rte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED;
+}
+
+uint16_t
+rte_dmadev_count(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
+			count++;
+	}
+
+	return count;
+}
+
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dmadev_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->device = dev->device;
+	dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
+
+	return 0;
+}
+
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans == 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure zero vchans", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans > info.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure too many vchans", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_silent &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_SILENT)) {
+		RTE_DMADEV_LOG(ERR, "Device %u don't support silent", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
+					     sizeof(struct rte_dmadev_conf));
+	if (ret == 0)
+		memcpy(&dev->data->dev_conf, dev_conf,
+		       sizeof(struct rte_dmadev_conf));
+
+	return ret;
+}
+
+int
+rte_dmadev_start(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_conf.nb_vchans == 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u must be configured first",
+			dev_id);
+		return -EINVAL;
+	}
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already started", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->data->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dmadev_stop(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started == 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already stopped", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dmadev_close(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped before closing", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	return (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+		       const struct rte_dmadev_vchan_conf *conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	bool src_is_dev, dst_is_dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail", dev_id);
+		return -EINVAL;
+	}
+	if (dev->data->dev_conf.nb_vchans == 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u must be configured first",
+			dev_id);
+		return -EINVAL;
+	}
+	if (vchan >= info.nb_vchans) {
+		RTE_DMADEV_LOG(ERR, "Device %u vchan out range!", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2mem transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2dev transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2mem transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2dev transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u number of descriptors invalid", dev_id);
+		return -EINVAL;
+	}
+	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->src_port.port_type == RTE_DMADEV_PORT_NONE && src_is_dev) ||
+	    (conf->src_port.port_type != RTE_DMADEV_PORT_NONE && !src_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u source port type invalid", dev_id);
+		return -EINVAL;
+	}
+	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->dst_port.port_type == RTE_DMADEV_PORT_NONE && dst_is_dev) ||
+	    (conf->dst_port.port_type != RTE_DMADEV_PORT_NONE && !dst_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u destination port type invalid", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
+					sizeof(struct rte_dmadev_vchan_conf));
+}
+
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (stats == NULL)
+		return -EINVAL;
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	memset(stats, 0, sizeof(struct rte_dmadev_stats));
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dmadev_stats));
+}
+
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+static const char *
+dmadev_capability_name(uint64_t capability)
+{
+	static const struct {
+		uint64_t capability;
+		const char *name;
+	} capa_names[] = {
+		{ RTE_DMADEV_CAPA_MEM_TO_MEM,  "mem2mem" },
+		{ RTE_DMADEV_CAPA_MEM_TO_DEV,  "mem2dev" },
+		{ RTE_DMADEV_CAPA_DEV_TO_MEM,  "dev2mem" },
+		{ RTE_DMADEV_CAPA_DEV_TO_DEV,  "dev2dev" },
+		{ RTE_DMADEV_CAPA_SVA,         "sva"     },
+		{ RTE_DMADEV_CAPA_SILENT,      "silent"  },
+		{ RTE_DMADEV_CAPA_OPS_COPY,    "copy"    },
+		{ RTE_DMADEV_CAPA_OPS_COPY_SG, "copy_sg" },
+		{ RTE_DMADEV_CAPA_OPS_FILL,    "fill"    },
+	};
+
+	const char *name = "unknown";
+	uint32_t i;
+
+	for (i = 0; i < RTE_DIM(capa_names); i++) {
+		if (capability == capa_names[i].capability) {
+			name = capa_names[i].name;
+			break;
+		}
+	}
+
+	return name;
+}
+
+static void
+dmadev_dump_capability(FILE *f, uint64_t dev_capa)
+{
+	uint64_t capa;
+
+	fprintf(f, "  dev_capa: 0x%" PRIx64 " -", dev_capa);
+	while (dev_capa > 0) {
+		capa = 1ull << __builtin_ctzll(dev_capa);
+		fprintf(f, " %s", dmadev_capability_name(capa));
+		dev_capa &= ~capa;
+	}
+	fprintf(f, "\n");
+}
+
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (f == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail", dev_id);
+		return -EINVAL;
+	}
+
+	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
+	dmadev_dump_capability(f, info.dev_capa);
+	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
+	fprintf(f, "  nb_vchans_configured: %u\n", info.nb_vchans);
+	fprintf(f, "  silent_mode: %s\n",
+		dev->data->dev_conf.enable_silent ? "on" : "off");
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index c8dd0009f5..3cb95fe31a 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -787,9 +787,21 @@ struct rte_dmadev_sge {
  *   - other values < 0 on failure.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
-		uint32_t length, uint64_t flags);
+		uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+#endif
+
+	return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
 
 /**
  * @warning
@@ -825,10 +837,23 @@ rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
  *   - other values < 0 on failure.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
 		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
-		   uint64_t flags);
+		   uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans ||
+	    src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+#endif
+
+	return (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);
+}
 
 /**
  * @warning
@@ -860,9 +885,21 @@ rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
  *   - other values < 0 on failure.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
-		rte_iova_t dst, uint32_t length, uint64_t flags);
+		rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+#endif
+
+	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
 
 /**
  * @warning
@@ -882,8 +919,20 @@ rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
  *   0 on success. Otherwise negative value is returned.
  */
 __rte_experimental
-int
-rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
+static inline int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+#endif
+
+	return (*dev->submit)(dev, vchan);
+}
 
 /**
  * @warning
@@ -909,9 +958,37 @@ rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
  *   must be less than or equal to the value of nb_cpls.
  */
 __rte_experimental
-uint16_t
+static inline uint16_t
 rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
-		     uint16_t *last_idx, bool *has_error);
+		     uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans || nb_cpls == 0)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
 
 /**
  * @warning
@@ -941,10 +1018,27 @@ rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
  *   status array are also set.
  */
 __rte_experimental
-uint16_t
+static inline uint16_t
 rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
 			    const uint16_t nb_cpls, uint16_t *last_idx,
-			    enum rte_dma_status_code *status);
+			    enum rte_dma_status_code *status)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans ||
+	    nb_cpls == 0 || status == NULL)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
+}
 
 #ifdef __cplusplus
 }
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index 1ee8971c40..32618b020c 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -178,4 +178,6 @@ struct rte_dmadev {
 	uint64_t reserved[2]; /**< Reserved for future fields. */
 } __rte_cache_aligned;
 
+extern struct rte_dmadev rte_dmadevices[];
+
 #endif /* _RTE_DMADEV_CORE_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index d027eeac97..80be592713 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -26,6 +26,7 @@ EXPERIMENTAL {
 INTERNAL {
         global:
 
+	rte_dmadevices;
 	rte_dmadev_get_device_by_name;
 	rte_dmadev_pmd_allocate;
 	rte_dmadev_pmd_release;
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v20 5/7] doc: add DMA device library guide
  2021-09-04 10:10 ` [dpdk-dev] [PATCH v20 0/7] support dmadev Chengwen Feng
                     ` (3 preceding siblings ...)
  2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 4/7] dmadev: introduce DMA device library implementation Chengwen Feng
@ 2021-09-04 10:10   ` Chengwen Feng
  2021-09-04 10:17     ` Jerin Jacob
  2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 6/7] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
                     ` (2 subsequent siblings)
  7 siblings, 1 reply; 339+ messages in thread
From: Chengwen Feng @ 2021-09-04 10:10 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch adds dmadev library guide.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Conor Walsh <conor.walsh@intel.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
 MAINTAINERS                          |   1 +
 doc/guides/prog_guide/dmadev.rst     | 125 ++++++++++++
 doc/guides/prog_guide/img/dmadev.svg | 283 +++++++++++++++++++++++++++
 doc/guides/prog_guide/index.rst      |   1 +
 4 files changed, 410 insertions(+)
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg

diff --git a/MAINTAINERS b/MAINTAINERS
index 9885cc56b7..e237e9406b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -499,6 +499,7 @@ F: doc/guides/prog_guide/rawdev.rst
 DMA device API - EXPERIMENTAL
 M: Chengwen Feng <fengchengwen@huawei.com>
 F: lib/dmadev/
+F: doc/guides/prog_guide/dmadev.rst
 
 
 Memory Pool Drivers
diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
new file mode 100644
index 0000000000..e47a164850
--- /dev/null
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -0,0 +1,125 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright 2021 HiSilicon Limited
+
+DMA Device Library
+====================
+
+The DMA library provides a DMA device framework for management and provisioning
+of hardware and software DMA poll mode drivers, defining generic APIs which
+support a number of different DMA operations.
+
+
+Design Principles
+-----------------
+
+The DMA library follows the same basic principles as those used in DPDK's
+Ethernet Device framework and the RegEx framework. The DMA framework provides
+a generic DMA device framework which supports both physical (hardware)
+and virtual (software) DMA devices as well as a generic DMA API which allows
+DMA devices to be managed and configured and supports DMA operations to be
+provisioned on DMA poll mode driver.
+
+.. _figure_dmadev:
+
+.. figure:: img/dmadev.*
+
+The above figure shows the model on which the DMA framework is built on:
+
+ * The DMA controller could have multiple hardware DMA channels (aka. hardware
+   DMA queues), each hardware DMA channel should be represented by a dmadev.
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+   channel represents a different transfer context. The DMA operation request
+   must be submitted to the virtual DMA channel. e.g. Application could create
+   virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+   virtual DMA channel 1 for memory-to-device transfer scenario.
+
+
+Device Management
+-----------------
+
+Device Creation
+~~~~~~~~~~~~~~~
+
+Physical DMA controllers are discovered during the PCI probe/enumeration of the
+EAL function which is executed at DPDK initialization, this is based on their
+PCI BDF (bus/bridge, device, function). Specific physical DMA controllers, like
+other physical devices in DPDK can be listed using the EAL command line options.
+
+The dmadevs are dynamically allocated by using the API
+``rte_dmadev_pmd_allocate`` based on the number of hardware DMA channels.
+
+
+Device Identification
+~~~~~~~~~~~~~~~~~~~~~
+
+Each DMA device, whether physical or virtual is uniquely designated by two
+identifiers:
+
+- A unique device index used to designate the DMA device in all functions
+  exported by the DMA API.
+
+- A device name used to designate the DMA device in console messages, for
+  administration or debugging purposes.
+
+
+Device Configuration
+~~~~~~~~~~~~~~~~~~~~
+
+The rte_dmadev_configure API is used to configure a DMA device.
+
+.. code-block:: c
+
+   int rte_dmadev_configure(uint16_t dev_id,
+                            const struct rte_dmadev_conf *dev_conf);
+
+The ``rte_dmadev_conf`` structure is used to pass the configuration parameters
+for the DMA device for example the number of virtual DMA channels to set up,
+indication of whether to enable silent mode.
+
+
+Configuration of Virtual DMA Channels
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The rte_dmadev_vchan_setup API is used to configure a virtual DMA channel.
+
+.. code-block:: c
+
+   int rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+                              const struct rte_dmadev_vchan_conf *conf);
+
+The ``rte_dmadev_vchan_conf`` structure is used to pass the configuration
+parameters for the virtual DMA channel for example transfer direction, number of
+descriptor for the virtual DMA channel, source device access port parameter,
+destination device access port parameter.
+
+
+Device Features and Capabilities
+--------------------------------
+
+DMA devices may support different feature sets. The ``rte_dmadev_info_get`` API
+can be used to get the device info and supported features.
+
+Silent mode is a special device capability which does not require the
+application to invoke dequeue APIs.
+
+
+Enqueue / Dequeue APIs
+~~~~~~~~~~~~~~~~~~~~~~
+
+Enqueue APIs such as ``rte_dmadev_copy`` and ``rte_dmadev_fill`` can be used to
+enqueue operations to hardware. If an enqueue is successful, a ``ring_idx`` is
+returned. This ``ring_idx`` can be used by applications to track per operation
+metadata in an application-defined circular ring.
+
+The ``rte_dmadev_submit`` API is used to issue doorbell to hardware.
+Alternatively the ``RTE_DMA_OP_FLAG_SUBMIT`` flag can be passed to the enqueue
+APIs to also issue the doorbell to hardware.
+
+There are two dequeue APIs ``rte_dmadev_completed`` and
+``rte_dmadev_completed_status``, these are used to obtain the results of
+the enqueue requests. ``rte_dmadev_completed`` will return the number of
+successfully completed operations. ``rte_dmadev_completed_status`` will return
+the number of completed operations along with the status of each operation
+(filled into the ``status`` array passed by user). These two APIs can also
+return the last completed operation's ``ring_idx`` which could help user track
+operations within their own application-defined rings.
diff --git a/doc/guides/prog_guide/img/dmadev.svg b/doc/guides/prog_guide/img/dmadev.svg
new file mode 100644
index 0000000000..157d7eb7dc
--- /dev/null
+++ b/doc/guides/prog_guide/img/dmadev.svg
@@ -0,0 +1,283 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<!-- SPDX-License-Identifier: BSD-3-Clause -->
+<!-- Copyright(c) 2021 HiSilicon Limited -->
+
+<svg
+   width="128.64288mm"
+   height="95.477707mm"
+   viewBox="0 0 192.96433 143.21656"
+   version="1.1"
+   id="svg934"
+   inkscape:version="1.1 (c68e22c387, 2021-05-23)"
+   sodipodi:docname="dmadev.svg"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg">
+  <sodipodi:namedview
+     id="namedview936"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageshadow="2"
+     inkscape:pageopacity="0.0"
+     inkscape:pagecheckerboard="0"
+     inkscape:document-units="mm"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:showpageshadow="false"
+     inkscape:zoom="1.332716"
+     inkscape:cx="335.03011"
+     inkscape:cy="143.69152"
+     inkscape:window-width="1920"
+     inkscape:window-height="976"
+     inkscape:window-x="-8"
+     inkscape:window-y="-8"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="layer1"
+     scale-x="1.5"
+     units="mm" />
+  <defs
+     id="defs931">
+    <rect
+       x="342.43954"
+       y="106.56832"
+       width="58.257381"
+       height="137.82834"
+       id="rect17873" />
+  </defs>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-0.13857517,-21.527306)">
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9"
+       width="50"
+       height="28"
+       x="0.13857517"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1"
+       transform="translate(-49.110795,15.205683)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1045">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1047">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5"
+       width="50"
+       height="28"
+       x="60.138577"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4"
+       transform="translate(10.512565,15.373298)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1049">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1051">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5-3"
+       width="50"
+       height="28"
+       x="137.43863"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-8"
+       transform="translate(88.79231,15.373299)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1053">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1055">channel</tspan></text>
+    <text
+       xml:space="preserve"
+       transform="matrix(0.26458333,0,0,0.26458333,-0.04940429,21.408845)"
+       id="text17871"
+       style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;white-space:pre;shape-inside:url(#rect17873);fill:#000000;fill-opacity:1;stroke:none" />
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8"
+       width="38.34557"
+       height="19.729115"
+       x="36.138577"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3"
+       transform="translate(-13.394978,59.135217)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1057">dmadev</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0"
+       width="60.902534"
+       height="24.616455"
+       x="25.196909"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76"
+       transform="translate(-24.485484,90.97883)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1059">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1061">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-6"
+       width="60.902534"
+       height="24.616455"
+       x="132.20036"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-7"
+       transform="translate(82.950904,90.79085)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1063">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1065">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-4"
+       width="60.902534"
+       height="24.616455"
+       x="76.810928"
+       y="140.12741"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-4"
+       transform="translate(27.032341,133.10574)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1067">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1069">controller</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8-5"
+       width="38.34557"
+       height="19.729115"
+       x="143.43863"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-7"
+       transform="translate(94.92597,59.664385)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1071">dmadev</tspan></text>
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 74.476373,49.527306 62.82407,64.827354"
+       id="path45308"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 35.924309,49.527306 47.711612,64.827354"
+       id="path45310"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 55.403414,84.556469 55.53332,98.47744"
+       id="path45312"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8"
+       inkscape:connection-end="#rect31-9-5-8-0" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.62241,84.556469 0.0155,13.920971"
+       id="path45320"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-5"
+       inkscape:connection-end="#rect31-9-5-8-0-6" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 146.28317,123.09389 -22.65252,17.03352"
+       id="path45586"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0-6"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 70.900938,123.09389 21.108496,17.03352"
+       id="path45588"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.50039,49.527306 0.0675,15.300048"
+       id="path45956"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-3"
+       inkscape:connection-end="#rect31-9-5-8-5" />
+  </g>
+</svg>
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 2dce507f46..0abea06b24 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -29,6 +29,7 @@ Programmer's Guide
     regexdev
     rte_security
     rawdev
+    dmadev
     link_bonding_poll_mode_drv_lib
     timer_lib
     hash_lib
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v20 6/7] dma/skeleton: introduce skeleton dmadev driver
  2021-09-04 10:10 ` [dpdk-dev] [PATCH v20 0/7] support dmadev Chengwen Feng
                     ` (4 preceding siblings ...)
  2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 5/7] doc: add DMA device library guide Chengwen Feng
@ 2021-09-04 10:10   ` Chengwen Feng
  2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 7/7] app/test: add dmadev API test Chengwen Feng
  2021-09-06 13:37   ` [dpdk-dev] [PATCH v20 0/7] support dmadev Bruce Richardson
  7 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-04 10:10 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

Skeleton dmadevice driver, on the lines of rawdev skeleton, is for
showcasing of the dmadev library.

Design of skeleton involves a virtual device which is plugged into VDEV
bus on initialization.

Also, enable compilation of dmadev skeleton drivers.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 MAINTAINERS                            |   1 +
 drivers/dma/meson.build                |  11 +
 drivers/dma/skeleton/meson.build       |   7 +
 drivers/dma/skeleton/skeleton_dmadev.c | 594 +++++++++++++++++++++++++
 drivers/dma/skeleton/skeleton_dmadev.h |  61 +++
 drivers/dma/skeleton/version.map       |   3 +
 drivers/meson.build                    |   1 +
 7 files changed, 678 insertions(+)
 create mode 100644 drivers/dma/meson.build
 create mode 100644 drivers/dma/skeleton/meson.build
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
 create mode 100644 drivers/dma/skeleton/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index e237e9406b..2b505ce71e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -499,6 +499,7 @@ F: doc/guides/prog_guide/rawdev.rst
 DMA device API - EXPERIMENTAL
 M: Chengwen Feng <fengchengwen@huawei.com>
 F: lib/dmadev/
+F: drivers/dma/skeleton/
 F: doc/guides/prog_guide/dmadev.rst
 
 
diff --git a/drivers/dma/meson.build b/drivers/dma/meson.build
new file mode 100644
index 0000000000..0c2c34cd00
--- /dev/null
+++ b/drivers/dma/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+if is_windows
+    subdir_done()
+endif
+
+drivers = [
+        'skeleton',
+]
+std_deps = ['dmadev']
diff --git a/drivers/dma/skeleton/meson.build b/drivers/dma/skeleton/meson.build
new file mode 100644
index 0000000000..27509b1668
--- /dev/null
+++ b/drivers/dma/skeleton/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+deps += ['dmadev', 'kvargs', 'ring', 'bus_vdev']
+sources = files(
+        'skeleton_dmadev.c',
+)
diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c
new file mode 100644
index 0000000000..0cc7e2409f
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.c
@@ -0,0 +1,594 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#include <inttypes.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_cycles.h>
+#include <rte_eal.h>
+#include <rte_kvargs.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+
+#include <rte_dmadev_pmd.h>
+
+#include "skeleton_dmadev.h"
+
+RTE_LOG_REGISTER_DEFAULT(skeldma_logtype, INFO);
+#define SKELDMA_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, skeldma_logtype, "%s(): " fmt "\n", \
+		__func__, ##args)
+
+/* Count of instances, currently only 1 is supported. */
+static uint16_t skeldma_count;
+
+static int
+skeldma_info_get(const struct rte_dmadev *dev, struct rte_dmadev_info *dev_info,
+		 uint32_t info_sz)
+{
+#define SKELDMA_MAX_DESC	8192
+#define SKELDMA_MIN_DESC	128
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(info_sz);
+
+	dev_info->dev_capa = RTE_DMADEV_CAPA_MEM_TO_MEM |
+			     RTE_DMADEV_CAPA_SVA |
+			     RTE_DMADEV_CAPA_OPS_COPY;
+	dev_info->max_vchans = 1;
+	dev_info->max_desc = SKELDMA_MAX_DESC;
+	dev_info->min_desc = SKELDMA_MIN_DESC;
+
+	return 0;
+}
+
+static int
+skeldma_configure(struct rte_dmadev *dev, const struct rte_dmadev_conf *conf,
+		  uint32_t conf_sz)
+{
+	RTE_SET_USED(dev);
+	RTE_SET_USED(conf);
+	RTE_SET_USED(conf_sz);
+	return 0;
+}
+
+static void *
+cpucopy_thread(void *param)
+{
+#define SLEEP_THRESHOLD		10000
+#define SLEEP_US_VAL		10
+
+	struct rte_dmadev *dev = (struct rte_dmadev *)param;
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	int ret;
+
+	while (!hw->exit_flag) {
+		ret = rte_ring_dequeue(hw->desc_running, (void **)&desc);
+		if (ret) {
+			hw->zero_req_count++;
+			if (hw->zero_req_count > SLEEP_THRESHOLD) {
+				if (hw->zero_req_count == 0)
+					hw->zero_req_count = SLEEP_THRESHOLD;
+				rte_delay_us_sleep(SLEEP_US_VAL);
+			}
+			continue;
+		}
+
+		hw->zero_req_count = 0;
+		rte_memcpy(desc->dst, desc->src, desc->len);
+		hw->completed_count++;
+		(void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
+	}
+
+	return NULL;
+}
+
+static void
+fflush_ring(struct skeldma_hw *hw, struct rte_ring *ring)
+{
+	struct skeldma_desc *desc = NULL;
+	while (rte_ring_count(ring) > 0) {
+		(void)rte_ring_dequeue(ring, (void **)&desc);
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+}
+
+static int
+skeldma_start(struct rte_dmadev *dev)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	rte_cpuset_t cpuset;
+	int ret;
+
+	if (hw->desc_mem == NULL) {
+		SKELDMA_LOG(ERR, "Vchan was not setup, start fail!");
+		return -EINVAL;
+	}
+
+	/* Reset the dmadev to a known state, include:
+	 * 1) fflush pending/running/completed ring to empty ring.
+	 * 2) init ring idx to zero.
+	 * 3) init running statistics.
+	 * 4) mark cpucopy task exit_flag to false.
+	 */
+	fflush_ring(hw, hw->desc_pending);
+	fflush_ring(hw, hw->desc_running);
+	fflush_ring(hw, hw->desc_completed);
+	hw->ridx = 0;
+	hw->submitted_count = 0;
+	hw->zero_req_count = 0;
+	hw->completed_count = 0;
+	hw->exit_flag = false;
+
+	rte_mb();
+
+	ret = rte_ctrl_thread_create(&hw->thread, "dma_skeleton", NULL,
+				     cpucopy_thread, dev);
+	if (ret) {
+		SKELDMA_LOG(ERR, "Start cpucopy thread fail!");
+		return -EINVAL;
+	}
+
+	if (hw->lcore_id != -1) {
+		cpuset = rte_lcore_cpuset(hw->lcore_id);
+		ret = pthread_setaffinity_np(hw->thread, sizeof(cpuset),
+					     &cpuset);
+		if (ret)
+			SKELDMA_LOG(WARNING,
+				"Set thread affinity lcore = %u fail!",
+				hw->lcore_id);
+	}
+
+	return 0;
+}
+
+static int
+skeldma_stop(struct rte_dmadev *dev)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	hw->exit_flag = true;
+	rte_delay_ms(1);
+
+	pthread_cancel(hw->thread);
+	pthread_join(hw->thread, NULL);
+
+	return 0;
+}
+
+static int
+vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc)
+{
+	struct skeldma_desc *desc;
+	struct rte_ring *empty;
+	struct rte_ring *pending;
+	struct rte_ring *running;
+	struct rte_ring *completed;
+	uint16_t i;
+
+	desc = rte_zmalloc_socket("dma_skelteon_desc",
+				  nb_desc * sizeof(struct skeldma_desc),
+				  RTE_CACHE_LINE_SIZE, hw->socket_id);
+	if (desc == NULL) {
+		SKELDMA_LOG(ERR, "Malloc dma skeleton desc fail!");
+		return -ENOMEM;
+	}
+
+	empty = rte_ring_create("dma_skeleton_desc_empty", nb_desc,
+				hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	pending = rte_ring_create("dma_skeleton_desc_pending", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	running = rte_ring_create("dma_skeleton_desc_running", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	completed = rte_ring_create("dma_skeleton_desc_completed", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (empty == NULL || pending == NULL || running == NULL ||
+	    completed == NULL) {
+		SKELDMA_LOG(ERR, "Create dma skeleton desc ring fail!");
+		rte_ring_free(empty);
+		rte_ring_free(pending);
+		rte_ring_free(running);
+		rte_ring_free(completed);
+		rte_free(desc);
+		return -ENOMEM;
+	}
+
+	/* The real usable ring size is *count-1* instead of *count* to
+	 * differentiate a free ring from an empty ring.
+	 * @see rte_ring_create
+	 */
+	for (i = 0; i < nb_desc - 1; i++)
+		(void)rte_ring_enqueue(empty, (void *)(desc + i));
+
+	hw->desc_mem = desc;
+	hw->desc_empty = empty;
+	hw->desc_pending = pending;
+	hw->desc_running = running;
+	hw->desc_completed = completed;
+
+	return 0;
+}
+
+static void
+vchan_release(struct skeldma_hw *hw)
+{
+	if (hw->desc_mem == NULL)
+		return;
+
+	rte_free(hw->desc_mem);
+	hw->desc_mem = NULL;
+	rte_ring_free(hw->desc_empty);
+	hw->desc_empty = NULL;
+	rte_ring_free(hw->desc_pending);
+	hw->desc_pending = NULL;
+	rte_ring_free(hw->desc_running);
+	hw->desc_running = NULL;
+	rte_ring_free(hw->desc_completed);
+	hw->desc_completed = NULL;
+}
+
+static int
+skeldma_close(struct rte_dmadev *dev)
+{
+	/* The device already stopped */
+	vchan_release(dev->dev_private);
+	return 0;
+}
+
+static int
+skeldma_vchan_setup(struct rte_dmadev *dev, uint16_t vchan,
+		    const struct rte_dmadev_vchan_conf *conf,
+		    uint32_t conf_sz)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(conf_sz);
+
+	if (!rte_is_power_of_2(conf->nb_desc)) {
+		SKELDMA_LOG(ERR, "Number of desc must be power of 2!");
+		return -EINVAL;
+	}
+
+	vchan_release(hw);
+	return vchan_setup(hw, conf->nb_desc);
+}
+
+static int
+skeldma_stats_get(const struct rte_dmadev *dev, uint16_t vchan,
+		  struct rte_dmadev_stats *stats, uint32_t stats_sz)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(stats_sz);
+
+	stats->submitted = hw->submitted_count;
+	stats->completed = hw->completed_count;
+	stats->errors = 0;
+
+	return 0;
+}
+
+static int
+skeldma_stats_reset(struct rte_dmadev *dev, uint16_t vchan)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+
+	hw->submitted_count = 0;
+	hw->completed_count = 0;
+
+	return 0;
+}
+
+static int
+skeldma_dump(const struct rte_dmadev *dev, FILE *f)
+{
+#define GET_RING_COUNT(ring)	((ring) ? (rte_ring_count(ring)) : 0)
+
+	struct skeldma_hw *hw = dev->dev_private;
+
+	fprintf(f,
+		"  lcore_id: %d\n"
+		"  socket_id: %d\n"
+		"  desc_empty_ring_count: %u\n"
+		"  desc_pending_ring_count: %u\n"
+		"  desc_running_ring_count: %u\n"
+		"  desc_completed_ring_count: %u\n",
+		hw->lcore_id, hw->socket_id,
+		GET_RING_COUNT(hw->desc_empty),
+		GET_RING_COUNT(hw->desc_pending),
+		GET_RING_COUNT(hw->desc_running),
+		GET_RING_COUNT(hw->desc_completed));
+	fprintf(f,
+		"  next_ring_idx: %u\n"
+		"  submitted_count: %" PRIu64 "\n"
+		"  completed_count: %" PRIu64 "\n",
+		hw->ridx, hw->submitted_count, hw->completed_count);
+
+	return 0;
+}
+
+static inline void
+submit(struct skeldma_hw *hw, struct skeldma_desc *desc)
+{
+	uint16_t count = rte_ring_count(hw->desc_pending);
+	struct skeldma_desc *pend_desc = NULL;
+
+	while (count > 0) {
+		(void)rte_ring_dequeue(hw->desc_pending, (void **)&pend_desc);
+		(void)rte_ring_enqueue(hw->desc_running, (void *)pend_desc);
+		count--;
+	}
+
+	if (desc)
+		(void)rte_ring_enqueue(hw->desc_running, (void *)desc);
+}
+
+static int
+skeldma_copy(struct rte_dmadev *dev, uint16_t vchan,
+	     rte_iova_t src, rte_iova_t dst,
+	     uint32_t length, uint64_t flags)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc;
+	int ret;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(flags);
+
+	ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc);
+	if (ret)
+		return -ENOSPC;
+	desc->src = (void *)(uintptr_t)src;
+	desc->dst = (void *)(uintptr_t)dst;
+	desc->len = length;
+	desc->ridx = hw->ridx;
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT)
+		submit(hw, desc);
+	else
+		(void)rte_ring_enqueue(hw->desc_pending, (void *)desc);
+	hw->submitted_count++;
+
+	return hw->ridx++;
+}
+
+static int
+skeldma_submit(struct rte_dmadev *dev, uint16_t vchan)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	RTE_SET_USED(vchan);
+	submit(hw, NULL);
+	return 0;
+}
+
+static uint16_t
+skeldma_completed(struct rte_dmadev *dev,
+		  uint16_t vchan, const uint16_t nb_cpls,
+		  uint16_t *last_idx, bool *has_error)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	uint16_t index = 0;
+	uint16_t count;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(has_error);
+
+	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+	while (index < count) {
+		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+		if (index == count - 1)
+			*last_idx = desc->ridx;
+		index++;
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+
+	return count;
+}
+
+static uint16_t
+skeldma_completed_status(struct rte_dmadev *dev,
+			 uint16_t vchan, const uint16_t nb_cpls,
+			 uint16_t *last_idx, enum rte_dma_status_code *status)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	uint16_t index = 0;
+	uint16_t count;
+
+	RTE_SET_USED(vchan);
+
+	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+	while (index < count) {
+		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+		if (index == count - 1)
+			*last_idx = desc->ridx;
+		status[index++] = RTE_DMA_STATUS_SUCCESSFUL;
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+
+	return count;
+}
+
+static const struct rte_dmadev_ops skeldma_ops = {
+	.dev_info_get  = skeldma_info_get,
+	.dev_configure = skeldma_configure,
+	.dev_start     = skeldma_start,
+	.dev_stop      = skeldma_stop,
+	.dev_close     = skeldma_close,
+
+	.vchan_setup   = skeldma_vchan_setup,
+
+	.stats_get     = skeldma_stats_get,
+	.stats_reset   = skeldma_stats_reset,
+
+	.dev_dump      = skeldma_dump,
+};
+
+static int
+skeldma_create(const char *name, struct rte_vdev_device *vdev, int lcore_id)
+{
+	struct rte_dmadev *dev;
+	struct skeldma_hw *hw;
+	int socket_id;
+
+	dev = rte_dmadev_pmd_allocate(name);
+	if (dev == NULL) {
+		SKELDMA_LOG(ERR, "Unable to allocate dmadev: %s", name);
+		return -EINVAL;
+	}
+
+	socket_id = (lcore_id < 0) ? rte_socket_id() :
+				     rte_lcore_to_socket_id(lcore_id);
+	dev->dev_private = rte_zmalloc_socket("dmadev private",
+					      sizeof(struct skeldma_hw),
+					      RTE_CACHE_LINE_SIZE,
+					      socket_id);
+	if (!dev->dev_private) {
+		SKELDMA_LOG(ERR, "Unable to allocate device private memory");
+		(void)rte_dmadev_pmd_release(dev);
+		return -ENOMEM;
+	}
+
+	dev->copy = skeldma_copy;
+	dev->submit = skeldma_submit;
+	dev->completed = skeldma_completed;
+	dev->completed_status = skeldma_completed_status;
+	dev->dev_ops = &skeldma_ops;
+	dev->device = &vdev->device;
+
+	hw = dev->dev_private;
+	hw->lcore_id = lcore_id;
+	hw->socket_id = socket_id;
+
+	return dev->data->dev_id;
+}
+
+static int
+skeldma_destroy(const char *name)
+{
+	struct rte_dmadev *dev;
+	int ret;
+
+	dev = rte_dmadev_get_device_by_name(name);
+	if (!dev)
+		return -EINVAL;
+
+	ret = rte_dmadev_close(dev->data->dev_id);
+	if (ret)
+		return ret;
+
+	rte_free(dev->dev_private);
+	dev->dev_private = NULL;
+	(void)rte_dmadev_pmd_release(dev);
+
+	return 0;
+}
+
+static int
+skeldma_parse_lcore(const char *key __rte_unused,
+		    const char *value,
+		    void *opaque)
+{
+	int lcore_id = atoi(value);
+	if (lcore_id >= 0 && lcore_id < RTE_MAX_LCORE)
+		*(int *)opaque = lcore_id;
+	return 0;
+}
+
+static void
+skeldma_parse_vdev_args(struct rte_vdev_device *vdev, int *lcore_id)
+{
+	static const char *const args[] = {
+		SKELDMA_ARG_LCORE,
+		NULL
+	};
+
+	struct rte_kvargs *kvlist;
+	const char *params;
+
+	params = rte_vdev_device_args(vdev);
+	if (params == NULL || params[0] == '\0')
+		return;
+
+	kvlist = rte_kvargs_parse(params, args);
+	if (!kvlist)
+		return;
+
+	(void)rte_kvargs_process(kvlist, SKELDMA_ARG_LCORE,
+				 skeldma_parse_lcore, lcore_id);
+	SKELDMA_LOG(INFO, "Parse lcore_id = %d", *lcore_id);
+
+	rte_kvargs_free(kvlist);
+}
+
+static int
+skeldma_probe(struct rte_vdev_device *vdev)
+{
+	const char *name;
+	int lcore_id = -1;
+	int ret;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		SKELDMA_LOG(ERR, "Multiple process not supported for %s", name);
+		return -EINVAL;
+	}
+
+	/* More than one instance is not supported */
+	if (skeldma_count > 0) {
+		SKELDMA_LOG(ERR, "Multiple instance not supported for %s",
+			name);
+		return -EINVAL;
+	}
+
+	skeldma_parse_vdev_args(vdev, &lcore_id);
+
+	ret = skeldma_create(name, vdev, lcore_id);
+	if (ret >= 0) {
+		SKELDMA_LOG(INFO, "Create %s dmadev with lcore-id %d",
+			name, lcore_id);
+		skeldma_count = 1;
+	}
+
+	return ret < 0 ? ret : 0;
+}
+
+static int
+skeldma_remove(struct rte_vdev_device *vdev)
+{
+	const char *name;
+	int ret;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -1;
+
+	ret = skeldma_destroy(name);
+	if (!ret) {
+		skeldma_count = 0;
+		SKELDMA_LOG(INFO, "Remove %s dmadev", name);
+	}
+
+	return ret;
+}
+
+static struct rte_vdev_driver skeldma_pmd_drv = {
+	.probe = skeldma_probe,
+	.remove = skeldma_remove,
+	.drv_flags = RTE_VDEV_DRV_NEED_IOVA_AS_VA,
+};
+
+RTE_PMD_REGISTER_VDEV(dma_skeleton, skeldma_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(dma_skeleton,
+		SKELDMA_ARG_LCORE "=<uint16> ");
diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h
new file mode 100644
index 0000000000..1cdfdde153
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef __SKELETON_DMADEV_H__
+#define __SKELETON_DMADEV_H__
+
+#include <pthread.h>
+
+#include <rte_ring.h>
+
+#define SKELDMA_ARG_LCORE	"lcore"
+
+struct skeldma_desc {
+	void *src;
+	void *dst;
+	uint32_t len;
+	uint16_t ridx; /* ring idx */
+};
+
+struct skeldma_hw {
+	int lcore_id; /* cpucopy task affinity core */
+	int socket_id;
+	pthread_t thread; /* cpucopy task thread */
+	volatile int exit_flag; /* cpucopy task exit flag */
+
+	struct skeldma_desc *desc_mem;
+
+	/* Descriptor ring state machine:
+	 *
+	 *  -----------     enqueue without submit     -----------
+	 *  |  empty  |------------------------------->| pending |
+	 *  -----------\                               -----------
+	 *       ^      \------------                       |
+	 *       |                  |                       |submit doorbell
+	 *       |                  |                       |
+	 *       |                  |enqueue with submit    |
+	 *       |get completed     |------------------|    |
+	 *       |                                     |    |
+	 *       |                                     v    v
+	 *  -----------     cpucopy thread working     -----------
+	 *  |completed|<-------------------------------| running |
+	 *  -----------                                -----------
+	 */
+	struct rte_ring *desc_empty;
+	struct rte_ring *desc_pending;
+	struct rte_ring *desc_running;
+	struct rte_ring *desc_completed;
+
+	/* Cache delimiter for dataplane API's operation data */
+	char cache1 __rte_cache_aligned;
+	uint16_t ridx;  /* ring idx */
+	uint64_t submitted_count;
+
+	/* Cache delimiter for cpucopy thread's operation data */
+	char cache2 __rte_cache_aligned;
+	uint32_t zero_req_count;
+	uint64_t completed_count;
+};
+
+#endif /* __SKELETON_DMADEV_H__ */
diff --git a/drivers/dma/skeleton/version.map b/drivers/dma/skeleton/version.map
new file mode 100644
index 0000000000..c2e0723b4c
--- /dev/null
+++ b/drivers/dma/skeleton/version.map
@@ -0,0 +1,3 @@
+DPDK_22 {
+	local: *;
+};
diff --git a/drivers/meson.build b/drivers/meson.build
index d9e331ec85..a390787d6a 100644
--- a/drivers/meson.build
+++ b/drivers/meson.build
@@ -18,6 +18,7 @@ subdirs = [
         'vdpa',           # depends on common, bus and mempool.
         'event',          # depends on common, bus, mempool and net.
         'baseband',       # depends on common and bus.
+        'dma',            # depends on common and bus.
 ]
 
 if meson.is_cross_build()
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v20 7/7] app/test: add dmadev API test
  2021-09-04 10:10 ` [dpdk-dev] [PATCH v20 0/7] support dmadev Chengwen Feng
                     ` (5 preceding siblings ...)
  2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 6/7] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
@ 2021-09-04 10:10   ` Chengwen Feng
  2021-09-06 13:37   ` [dpdk-dev] [PATCH v20 0/7] support dmadev Bruce Richardson
  7 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-04 10:10 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch add dmadev API test which based on 'dma_skeleton' vdev. The
test cases could be executed using 'dmadev_autotest' command in test
framework.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 MAINTAINERS                |   1 +
 app/test/meson.build       |   4 +
 app/test/test_dmadev.c     |  43 +++
 app/test/test_dmadev_api.c | 543 +++++++++++++++++++++++++++++++++++++
 4 files changed, 591 insertions(+)
 create mode 100644 app/test/test_dmadev.c
 create mode 100644 app/test/test_dmadev_api.c

diff --git a/MAINTAINERS b/MAINTAINERS
index 2b505ce71e..a19a3cb53c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -500,6 +500,7 @@ DMA device API - EXPERIMENTAL
 M: Chengwen Feng <fengchengwen@huawei.com>
 F: lib/dmadev/
 F: drivers/dma/skeleton/
+F: app/test/test_dmadev*
 F: doc/guides/prog_guide/dmadev.rst
 
 
diff --git a/app/test/meson.build b/app/test/meson.build
index a7611686ad..9027eba3a4 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -43,6 +43,8 @@ test_sources = files(
         'test_debug.c',
         'test_distributor.c',
         'test_distributor_perf.c',
+        'test_dmadev.c',
+        'test_dmadev_api.c',
         'test_eal_flags.c',
         'test_eal_fs.c',
         'test_efd.c',
@@ -162,6 +164,7 @@ test_deps = [
         'cmdline',
         'cryptodev',
         'distributor',
+        'dmadev',
         'efd',
         'ethdev',
         'eventdev',
@@ -333,6 +336,7 @@ driver_test_names = [
         'cryptodev_sw_mvsam_autotest',
         'cryptodev_sw_snow3g_autotest',
         'cryptodev_sw_zuc_autotest',
+        'dmadev_autotest',
         'eventdev_selftest_octeontx',
         'eventdev_selftest_sw',
         'rawdev_autotest',
diff --git a/app/test/test_dmadev.c b/app/test/test_dmadev.c
new file mode 100644
index 0000000000..92c47fc041
--- /dev/null
+++ b/app/test/test_dmadev.c
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <rte_dmadev.h>
+#include <rte_bus_vdev.h>
+
+#include "test.h"
+
+/* from test_dmadev_api.c */
+extern int test_dmadev_api(uint16_t dev_id);
+
+static int
+test_apis(void)
+{
+	const char *pmd = "dma_skeleton";
+	int id;
+	int ret;
+
+	if (rte_vdev_init(pmd, NULL) < 0)
+		return TEST_SKIPPED;
+	id = rte_dmadev_get_dev_id(pmd);
+	if (id < 0)
+		return TEST_SKIPPED;
+	printf("\n### Test dmadev infrastructure using skeleton driver\n");
+	ret = test_dmadev_api(id);
+	rte_vdev_uninit(pmd);
+
+	return ret;
+}
+
+static int
+test_dmadev(void)
+{
+	/* basic sanity on dmadev infrastructure */
+	if (test_apis() < 0)
+		return -1;
+
+	return 0;
+}
+
+REGISTER_TEST_COMMAND(dmadev_autotest, test_dmadev);
diff --git a/app/test/test_dmadev_api.c b/app/test/test_dmadev_api.c
new file mode 100644
index 0000000000..55046ac485
--- /dev/null
+++ b/app/test/test_dmadev_api.c
@@ -0,0 +1,543 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#include <string.h>
+
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_test.h>
+#include <rte_dmadev.h>
+
+extern int test_dmadev_api(uint16_t dev_id);
+
+#define SKELDMA_TEST_RUN(test) \
+	testsuite_run_test(test, #test)
+
+#define TEST_MEMCPY_SIZE	1024
+#define TEST_WAIT_US_VAL	50000
+
+#define TEST_SUCCESS 0
+#define TEST_FAILED  -1
+
+static uint16_t test_dev_id;
+static uint16_t invalid_dev_id;
+
+static char *src;
+static char *dst;
+
+static int total;
+static int passed;
+static int failed;
+
+static int
+testsuite_setup(uint16_t dev_id)
+{
+	test_dev_id = dev_id;
+	invalid_dev_id = RTE_DMADEV_MAX_DEVS;
+
+	src = rte_malloc("dmadev_test_src", TEST_MEMCPY_SIZE, 0);
+	if (src == NULL)
+		return -ENOMEM;
+	dst = rte_malloc("dmadev_test_dst", TEST_MEMCPY_SIZE, 0);
+	if (dst == NULL) {
+		rte_free(src);
+		src = NULL;
+		return -ENOMEM;
+	}
+
+	total = 0;
+	passed = 0;
+	failed = 0;
+
+	/* Set dmadev log level to critical to suppress unnecessary output
+	 * during API tests.
+	 */
+	rte_log_set_level_pattern("lib.dmadev", RTE_LOG_CRIT);
+
+	return 0;
+}
+
+static void
+testsuite_teardown(void)
+{
+	rte_free(src);
+	src = NULL;
+	rte_free(dst);
+	dst = NULL;
+	/* Ensure the dmadev is stopped. */
+	rte_dmadev_stop(test_dev_id);
+
+	rte_log_set_level_pattern("lib.dmadev", RTE_LOG_INFO);
+}
+
+static void
+testsuite_run_test(int (*test)(void), const char *name)
+{
+	int ret = 0;
+
+	if (test) {
+		ret = test();
+		if (ret < 0) {
+			failed++;
+			printf("%s Failed\n", name);
+		} else {
+			passed++;
+			printf("%s Passed\n", name);
+		}
+	}
+
+	total++;
+}
+
+static int
+test_dmadev_get_dev_id(void)
+{
+	int ret = rte_dmadev_get_dev_id("invalid_dmadev_device");
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_is_valid_dev(void)
+{
+	int ret;
+	ret = rte_dmadev_is_valid_dev(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == false, "Expected false for invalid dev id");
+	ret = rte_dmadev_is_valid_dev(test_dev_id);
+	RTE_TEST_ASSERT(ret == true, "Expected true for valid dev id");
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_count(void)
+{
+	uint16_t count = rte_dmadev_count();
+	RTE_TEST_ASSERT(count > 0, "Invalid dmadev count %u", count);
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_info_get(void)
+{
+	struct rte_dmadev_info info =  { 0 };
+	int ret;
+
+	ret = rte_dmadev_info_get(invalid_dev_id, &info);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_info_get(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_configure(void)
+{
+	struct rte_dmadev_conf conf = { 0 };
+	struct rte_dmadev_info info = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_configure(invalid_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_configure(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for nb_vchans == 0 */
+	memset(&conf, 0, sizeof(conf));
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for conf.nb_vchans > info.max_vchans */
+	ret = rte_dmadev_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans + 1;
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check enable silent mode */
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans;
+	conf.enable_silent = true;
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Configure success */
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans;
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret);
+
+	/* Check configure success */
+	ret = rte_dmadev_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	RTE_TEST_ASSERT_EQUAL(conf.nb_vchans, info.nb_vchans,
+			      "Configure nb_vchans not match");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_vchan_setup(void)
+{
+	struct rte_dmadev_vchan_conf vchan_conf = { 0 };
+	struct rte_dmadev_conf dev_conf = { 0 };
+	struct rte_dmadev_info dev_info = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_vchan_setup(invalid_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Make sure configure success */
+	ret = rte_dmadev_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	dev_conf.nb_vchans = dev_info.max_vchans;
+	ret = rte_dmadev_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret);
+
+	/* Check for invalid vchan */
+	ret = rte_dmadev_vchan_setup(test_dev_id, dev_conf.nb_vchans,
+				     &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for direction */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV + 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM - 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for direction and dev_capa combination */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_DEV;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_MEM;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for nb_desc validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc - 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.nb_desc = dev_info.max_desc + 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check src port type validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	vchan_conf.src_port.port_type = RTE_DMADEV_PORT_PCIE;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check dst port type validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	vchan_conf.dst_port.port_type = RTE_DMADEV_PORT_PCIE;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check vchan setup success */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+setup_one_vchan(void)
+{
+	struct rte_dmadev_vchan_conf vchan_conf = { 0 };
+	struct rte_dmadev_info dev_info = { 0 };
+	struct rte_dmadev_conf dev_conf = { 0 };
+	int ret;
+
+	ret = rte_dmadev_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+	dev_conf.nb_vchans = dev_info.max_vchans;
+	ret = rte_dmadev_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_start_stop(void)
+{
+	struct rte_dmadev_vchan_conf vchan_conf = { 0 };
+	struct rte_dmadev_conf dev_conf = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_start(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stop(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dmadev_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Check reconfigure and vchan setup when device started */
+	ret = rte_dmadev_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT(ret == -EBUSY, "Failed to configure, %d", ret);
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EBUSY, "Failed to setup vchan, %d", ret);
+
+	ret = rte_dmadev_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_stats(void)
+{
+	struct rte_dmadev_info dev_info = { 0 };
+	struct rte_dmadev_stats stats = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_stats_get(invalid_dev_id, 0, &stats);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stats_get(invalid_dev_id, 0, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stats_reset(invalid_dev_id, 0);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	/* Check for invalid vchan */
+	ret = rte_dmadev_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+	ret = rte_dmadev_stats_get(test_dev_id, dev_info.max_vchans, &stats);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stats_reset(test_dev_id, dev_info.max_vchans);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for valid vchan */
+	ret = rte_dmadev_stats_get(test_dev_id, 0, &stats);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get stats, %d", ret);
+	ret = rte_dmadev_stats_get(test_dev_id, RTE_DMADEV_ALL_VCHAN, &stats);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get all stats, %d", ret);
+	ret = rte_dmadev_stats_reset(test_dev_id, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset stats, %d", ret);
+	ret = rte_dmadev_stats_reset(test_dev_id, RTE_DMADEV_ALL_VCHAN);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset all stats, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_dump(void)
+{
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_dump(invalid_dev_id, stderr);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Excepted -EINVAL, %d", ret);
+	ret = rte_dmadev_dump(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Excepted -EINVAL, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_completed(void)
+{
+	uint16_t last_idx = 1;
+	bool has_error = true;
+	uint16_t cpl_ret;
+	int ret, i;
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dmadev_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Setup test memory */
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++)
+		src[i] = (char)i;
+	memset(dst, 0, TEST_MEMCPY_SIZE);
+
+	/* Check enqueue without submit */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, 0);
+	RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed(test_dev_id, 0, 1, &last_idx,
+				       &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to get completed");
+
+	/* Check add submit */
+	ret = rte_dmadev_submit(test_dev_id, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to submit, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed(test_dev_id, 0, 1, &last_idx,
+				       &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u",
+				last_idx);
+	RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++) {
+		if (src[i] != dst[i]) {
+			RTE_TEST_ASSERT_EQUAL(src[i], dst[i],
+				"Failed to copy memory, %d %d", src[i], dst[i]);
+			break;
+		}
+	}
+
+	/* Setup test memory */
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++)
+		src[i] = (char)i;
+	memset(dst, 0, TEST_MEMCPY_SIZE);
+
+	/* Check for enqueue with submit */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 1, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed(test_dev_id, 0, 1, &last_idx,
+				       &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u",
+				last_idx);
+	RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++) {
+		if (src[i] != dst[i]) {
+			RTE_TEST_ASSERT_EQUAL(src[i], dst[i],
+				"Failed to copy memory, %d %d", src[i], dst[i]);
+			break;
+		}
+	}
+
+	/* Stop dmadev to make sure dmadev to a known state */
+	ret = rte_dmadev_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_completed_status(void)
+{
+	enum rte_dma_status_code status[1] = { 1 };
+	uint16_t last_idx = 1;
+	uint16_t cpl_ret, i;
+	int ret;
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dmadev_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Check for enqueue with submit */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed_status(test_dev_id, 0, 1, &last_idx,
+					      status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u",
+				last_idx);
+	for (i = 0; i < RTE_DIM(status); i++)
+		RTE_TEST_ASSERT_EQUAL(status[i], 0,
+				"Failed to completed status, %d", status[i]);
+
+	/* Check do completed status again */
+	cpl_ret = rte_dmadev_completed_status(test_dev_id, 0, 1, &last_idx,
+					      status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to completed status");
+
+	/* Check for enqueue with submit again */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 1, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed_status(test_dev_id, 0, 1, &last_idx,
+					      status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u",
+				last_idx);
+	for (i = 0; i < RTE_DIM(status); i++)
+		RTE_TEST_ASSERT_EQUAL(status[i], 0,
+				"Failed to completed status, %d", status[i]);
+
+	/* Stop dmadev to make sure dmadev to a known state */
+	ret = rte_dmadev_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+int
+test_dmadev_api(uint16_t dev_id)
+{
+	int ret = testsuite_setup(dev_id);
+	if (ret) {
+		printf("testsuite setup fail!\n");
+		return -1;
+	}
+
+	/* If the testcase exit successfully, ensure that the test dmadev exist
+	 * and the dmadev is in the stopped state.
+	 */
+	SKELDMA_TEST_RUN(test_dmadev_get_dev_id);
+	SKELDMA_TEST_RUN(test_dmadev_is_valid_dev);
+	SKELDMA_TEST_RUN(test_dmadev_count);
+	SKELDMA_TEST_RUN(test_dmadev_info_get);
+	SKELDMA_TEST_RUN(test_dmadev_configure);
+	SKELDMA_TEST_RUN(test_dmadev_vchan_setup);
+	SKELDMA_TEST_RUN(test_dmadev_start_stop);
+	SKELDMA_TEST_RUN(test_dmadev_stats);
+	SKELDMA_TEST_RUN(test_dmadev_dump);
+	SKELDMA_TEST_RUN(test_dmadev_completed);
+	SKELDMA_TEST_RUN(test_dmadev_completed_status);
+
+	testsuite_teardown();
+
+	printf("Total tests   : %d\n", total);
+	printf("Passed        : %d\n", passed);
+	printf("Failed        : %d\n", failed);
+
+	if (failed)
+		return -1;
+
+	return 0;
+};
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library public APIs
  2021-09-03 13:03     ` Bruce Richardson
  2021-09-04  3:05       ` fengchengwen
@ 2021-09-04 10:10       ` Morten Brørup
  1 sibling, 0 replies; 339+ messages in thread
From: Morten Brørup @ 2021-09-04 10:10 UTC (permalink / raw)
  To: Bruce Richardson, Chengwen Feng
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Bruce Richardson
> Sent: Friday, 3 September 2021 15.04
> 
> On Thu, Sep 02, 2021 at 09:13:09PM +0800, Chengwen Feng wrote:
> > The 'dmadevice' is a generic type of DMA device.
> >
> > This patch introduce the 'dmadevice' public APIs which expose generic
> > operations that can enable configuration and I/O with the DMA
> devices.
> >
> > Maintainers update is also included in this patch.
> >
> > Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> > Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> > Acked-by: Morten Brørup <mb@smartsharesystems.com>
> > Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
> > ---
> >  MAINTAINERS                            |   4 +
> >  doc/api/doxy-api-index.md              |   1 +
> >  doc/api/doxy-api.conf.in               |   1 +
> >  doc/guides/rel_notes/release_21_11.rst |   5 +
> >  lib/dmadev/meson.build                 |   4 +
> >  lib/dmadev/rte_dmadev.h                | 949
> +++++++++++++++++++++++++++++++++
> >  lib/dmadev/version.map                 |  24 +
> >  lib/meson.build                        |   1 +
> >  8 files changed, 989 insertions(+)
> >  create mode 100644 lib/dmadev/meson.build
> >  create mode 100644 lib/dmadev/rte_dmadev.h
> >  create mode 100644 lib/dmadev/version.map
> >
> 
> <snip>
> 
> > +/**
> > + * @warning
> > + * @b EXPERIMENTAL: this API may change without prior notice.
> > + *
> > + * Trigger hardware to begin performing enqueued operations.
> > + *
> > + * This API is used to write the "doorbell" to the hardware to
> trigger it
> > + * to begin the operations previously enqueued by
> rte_dmadev_copy/fill().
> > + *
> > + * @param dev_id
> > + *   The identifier of the device.
> > + * @param vchan
> > + *   The identifier of virtual DMA channel.
> > + *
> > + * @return
> > + *   0 on success. Otherwise negative value is returned.
> > + */
> > +__rte_experimental
> > +int
> > +rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
> > +
> 
> Putting this out here for discussion:
> 
> Those developers here looking at how integration of dma acceleration
> into
> vhost-virtio e.g. for OVS use, have come back with the request that we
> provide a method for querying the amount of space in the descriptor
> ring,
> or the size of the next burst, or similar. Basically, the reason for
> the
> ask is to allow an app to determine if a set of jobs of size N can be
> enqueued before the first one is, so that we don't get a half-offload
> of
> copy of a multi-segment packet (for devices where scatter-gather is not
> available).
> 
> In our "ioat" rawdev driver, we did this by providing a
> "burst_capacity"
> API which returned the number of elements which could be enqueued in
> the
> next burst without error (normally the available ring space). Looking
> at
> the dmadev APIs, an alternative way to do this is to extend the
> "submit()"
> function to allow a 3rd optional parameter to return this info. That
> is,
> when submitting one burst of operations, you get info about how many
> more
> you can enqueue in the next burst. [For submitting packets via the
> submit
> flag, this info would not be available, as I feel ending all enqueue
> operations would be excessive].
> 
> Therefore, I see a number of options for us to meet the ask for space
> querying API:
> 1. provide a capacity API as done with ioat driver
> 2. provide (optional) capacity information from each submit() call
> 3. provide both #1 and #2 above as they are compatible
> 4. <some other idea>
> 
> For me, I think #3 is probably the most flexible approach. The benefit
> of
> #2 is that the info can be provided to the application much more
> cheaply
> than when the app has to call a separate API (which wouldn't be on the
> fast-path). However, a way to provide the info apart from submitting a
> burst would also be helpful, hence adding the extra function too (#1).
> 
> What are other people's thoughts or ideas on this?
> 

#2 Is low cost. However, the information about the remaining capacity quickly becomes outdated if not used immediately, so we also need #1.

And #1 can be also used from slow path, e.g. for telemetry purposes.

So I vote for providing #1 and optionally #2.

I also considered if a _bulk function would be useful in addition to the _burst function. But I think that the fast path application's decision is not binary (i.e. use DMA or not), the fast path application would want to process as many as possible by DMA and then process the remaining by software.

-Morten


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v20 5/7] doc: add DMA device library guide
  2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 5/7] doc: add DMA device library guide Chengwen Feng
@ 2021-09-04 10:17     ` Jerin Jacob
  0 siblings, 0 replies; 339+ messages in thread
From: Jerin Jacob @ 2021-09-04 10:17 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: Thomas Monjalon, Ferruh Yigit, Richardson, Bruce, Jerin Jacob,
	Andrew Rybchenko, dpdk-dev, Morten Brørup, Nipun Gupta,
	Hemant Agrawal, Maxime Coquelin, Honnappa Nagarahalli,
	David Marchand, Satananda Burla, Prasun Kapoor, Ananyev,
	Konstantin, Walsh, Conor, Laatz, Kevin

On Sat, Sep 4, 2021 at 3:44 PM Chengwen Feng <fengchengwen@huawei.com> wrote:
>
> This patch adds dmadev library guide.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Acked-by: Conor Walsh <conor.walsh@intel.com>
> Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>

Acked-by: Jerin Jacob <jerinj@marvell.com>


> ---
>  MAINTAINERS                          |   1 +
>  doc/guides/prog_guide/dmadev.rst     | 125 ++++++++++++
>  doc/guides/prog_guide/img/dmadev.svg | 283 +++++++++++++++++++++++++++
>  doc/guides/prog_guide/index.rst      |   1 +
>  4 files changed, 410 insertions(+)
>  create mode 100644 doc/guides/prog_guide/dmadev.rst
>  create mode 100644 doc/guides/prog_guide/img/dmadev.svg
>
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 9885cc56b7..e237e9406b 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -499,6 +499,7 @@ F: doc/guides/prog_guide/rawdev.rst
>  DMA device API - EXPERIMENTAL
>  M: Chengwen Feng <fengchengwen@huawei.com>
>  F: lib/dmadev/
> +F: doc/guides/prog_guide/dmadev.rst
>
>
>  Memory Pool Drivers
> diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
> new file mode 100644
> index 0000000000..e47a164850
> --- /dev/null
> +++ b/doc/guides/prog_guide/dmadev.rst
> @@ -0,0 +1,125 @@
> +.. SPDX-License-Identifier: BSD-3-Clause
> +   Copyright 2021 HiSilicon Limited
> +
> +DMA Device Library
> +====================
> +
> +The DMA library provides a DMA device framework for management and provisioning
> +of hardware and software DMA poll mode drivers, defining generic APIs which
> +support a number of different DMA operations.
> +
> +
> +Design Principles
> +-----------------
> +
> +The DMA library follows the same basic principles as those used in DPDK's
> +Ethernet Device framework and the RegEx framework. The DMA framework provides
> +a generic DMA device framework which supports both physical (hardware)
> +and virtual (software) DMA devices as well as a generic DMA API which allows
> +DMA devices to be managed and configured and supports DMA operations to be
> +provisioned on DMA poll mode driver.
> +
> +.. _figure_dmadev:
> +
> +.. figure:: img/dmadev.*
> +
> +The above figure shows the model on which the DMA framework is built on:
> +
> + * The DMA controller could have multiple hardware DMA channels (aka. hardware
> +   DMA queues), each hardware DMA channel should be represented by a dmadev.
> + * The dmadev could create multiple virtual DMA channels, each virtual DMA
> +   channel represents a different transfer context. The DMA operation request
> +   must be submitted to the virtual DMA channel. e.g. Application could create
> +   virtual DMA channel 0 for memory-to-memory transfer scenario, and create
> +   virtual DMA channel 1 for memory-to-device transfer scenario.
> +
> +
> +Device Management
> +-----------------
> +
> +Device Creation
> +~~~~~~~~~~~~~~~
> +
> +Physical DMA controllers are discovered during the PCI probe/enumeration of the
> +EAL function which is executed at DPDK initialization, this is based on their
> +PCI BDF (bus/bridge, device, function). Specific physical DMA controllers, like
> +other physical devices in DPDK can be listed using the EAL command line options.
> +
> +The dmadevs are dynamically allocated by using the API
> +``rte_dmadev_pmd_allocate`` based on the number of hardware DMA channels.
> +
> +
> +Device Identification
> +~~~~~~~~~~~~~~~~~~~~~
> +
> +Each DMA device, whether physical or virtual is uniquely designated by two
> +identifiers:
> +
> +- A unique device index used to designate the DMA device in all functions
> +  exported by the DMA API.
> +
> +- A device name used to designate the DMA device in console messages, for
> +  administration or debugging purposes.
> +
> +
> +Device Configuration
> +~~~~~~~~~~~~~~~~~~~~
> +
> +The rte_dmadev_configure API is used to configure a DMA device.
> +
> +.. code-block:: c
> +
> +   int rte_dmadev_configure(uint16_t dev_id,
> +                            const struct rte_dmadev_conf *dev_conf);
> +
> +The ``rte_dmadev_conf`` structure is used to pass the configuration parameters
> +for the DMA device for example the number of virtual DMA channels to set up,
> +indication of whether to enable silent mode.
> +
> +
> +Configuration of Virtual DMA Channels
> +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> +
> +The rte_dmadev_vchan_setup API is used to configure a virtual DMA channel.
> +
> +.. code-block:: c
> +
> +   int rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
> +                              const struct rte_dmadev_vchan_conf *conf);
> +
> +The ``rte_dmadev_vchan_conf`` structure is used to pass the configuration
> +parameters for the virtual DMA channel for example transfer direction, number of
> +descriptor for the virtual DMA channel, source device access port parameter,
> +destination device access port parameter.
> +
> +
> +Device Features and Capabilities
> +--------------------------------
> +
> +DMA devices may support different feature sets. The ``rte_dmadev_info_get`` API
> +can be used to get the device info and supported features.
> +
> +Silent mode is a special device capability which does not require the
> +application to invoke dequeue APIs.
> +
> +
> +Enqueue / Dequeue APIs
> +~~~~~~~~~~~~~~~~~~~~~~
> +
> +Enqueue APIs such as ``rte_dmadev_copy`` and ``rte_dmadev_fill`` can be used to
> +enqueue operations to hardware. If an enqueue is successful, a ``ring_idx`` is
> +returned. This ``ring_idx`` can be used by applications to track per operation
> +metadata in an application-defined circular ring.
> +
> +The ``rte_dmadev_submit`` API is used to issue doorbell to hardware.
> +Alternatively the ``RTE_DMA_OP_FLAG_SUBMIT`` flag can be passed to the enqueue
> +APIs to also issue the doorbell to hardware.
> +
> +There are two dequeue APIs ``rte_dmadev_completed`` and
> +``rte_dmadev_completed_status``, these are used to obtain the results of
> +the enqueue requests. ``rte_dmadev_completed`` will return the number of
> +successfully completed operations. ``rte_dmadev_completed_status`` will return
> +the number of completed operations along with the status of each operation
> +(filled into the ``status`` array passed by user). These two APIs can also
> +return the last completed operation's ``ring_idx`` which could help user track
> +operations within their own application-defined rings.
> diff --git a/doc/guides/prog_guide/img/dmadev.svg b/doc/guides/prog_guide/img/dmadev.svg
> new file mode 100644
> index 0000000000..157d7eb7dc
> --- /dev/null
> +++ b/doc/guides/prog_guide/img/dmadev.svg
> @@ -0,0 +1,283 @@
> +<?xml version="1.0" encoding="UTF-8" standalone="no"?>
> +<!-- Created with Inkscape (http://www.inkscape.org/) -->
> +
> +<!-- SPDX-License-Identifier: BSD-3-Clause -->
> +<!-- Copyright(c) 2021 HiSilicon Limited -->
> +
> +<svg
> +   width="128.64288mm"
> +   height="95.477707mm"
> +   viewBox="0 0 192.96433 143.21656"
> +   version="1.1"
> +   id="svg934"
> +   inkscape:version="1.1 (c68e22c387, 2021-05-23)"
> +   sodipodi:docname="dmadev.svg"
> +   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
> +   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
> +   xmlns="http://www.w3.org/2000/svg"
> +   xmlns:svg="http://www.w3.org/2000/svg">
> +  <sodipodi:namedview
> +     id="namedview936"
> +     pagecolor="#ffffff"
> +     bordercolor="#666666"
> +     borderopacity="1.0"
> +     inkscape:pageshadow="2"
> +     inkscape:pageopacity="0.0"
> +     inkscape:pagecheckerboard="0"
> +     inkscape:document-units="mm"
> +     showgrid="false"
> +     fit-margin-top="0"
> +     fit-margin-left="0"
> +     fit-margin-right="0"
> +     fit-margin-bottom="0"
> +     inkscape:showpageshadow="false"
> +     inkscape:zoom="1.332716"
> +     inkscape:cx="335.03011"
> +     inkscape:cy="143.69152"
> +     inkscape:window-width="1920"
> +     inkscape:window-height="976"
> +     inkscape:window-x="-8"
> +     inkscape:window-y="-8"
> +     inkscape:window-maximized="1"
> +     inkscape:current-layer="layer1"
> +     scale-x="1.5"
> +     units="mm" />
> +  <defs
> +     id="defs931">
> +    <rect
> +       x="342.43954"
> +       y="106.56832"
> +       width="58.257381"
> +       height="137.82834"
> +       id="rect17873" />
> +  </defs>
> +  <g
> +     inkscape:label="Layer 1"
> +     inkscape:groupmode="layer"
> +     id="layer1"
> +     transform="translate(-0.13857517,-21.527306)">
> +    <rect
> +       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
> +       id="rect31-9"
> +       width="50"
> +       height="28"
> +       x="0.13857517"
> +       y="21.527306"
> +       ry="0" />
> +    <text
> +       xml:space="preserve"
> +       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
> +       x="54.136707"
> +       y="18.045568"
> +       id="text803-1"
> +       transform="translate(-49.110795,15.205683)"><tspan
> +         x="54.136707"
> +         y="18.045568"
> +         id="tspan1045">virtual DMA </tspan><tspan
> +         x="54.136707"
> +         y="26.865018"
> +         id="tspan1047">channel</tspan></text>
> +    <rect
> +       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
> +       id="rect31-9-5"
> +       width="50"
> +       height="28"
> +       x="60.138577"
> +       y="21.527306"
> +       ry="0" />
> +    <text
> +       xml:space="preserve"
> +       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
> +       x="54.136707"
> +       y="18.045568"
> +       id="text803-1-4"
> +       transform="translate(10.512565,15.373298)"><tspan
> +         x="54.136707"
> +         y="18.045568"
> +         id="tspan1049">virtual DMA </tspan><tspan
> +         x="54.136707"
> +         y="26.865018"
> +         id="tspan1051">channel</tspan></text>
> +    <rect
> +       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
> +       id="rect31-9-5-3"
> +       width="50"
> +       height="28"
> +       x="137.43863"
> +       y="21.527306"
> +       ry="0" />
> +    <text
> +       xml:space="preserve"
> +       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
> +       x="54.136707"
> +       y="18.045568"
> +       id="text803-1-4-8"
> +       transform="translate(88.79231,15.373299)"><tspan
> +         x="54.136707"
> +         y="18.045568"
> +         id="tspan1053">virtual DMA </tspan><tspan
> +         x="54.136707"
> +         y="26.865018"
> +         id="tspan1055">channel</tspan></text>
> +    <text
> +       xml:space="preserve"
> +       transform="matrix(0.26458333,0,0,0.26458333,-0.04940429,21.408845)"
> +       id="text17871"
> +       style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;white-space:pre;shape-inside:url(#rect17873);fill:#000000;fill-opacity:1;stroke:none" />
> +    <rect
> +       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
> +       id="rect31-9-5-8"
> +       width="38.34557"
> +       height="19.729115"
> +       x="36.138577"
> +       y="64.827354"
> +       ry="0" />
> +    <text
> +       xml:space="preserve"
> +       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
> +       x="54.136707"
> +       y="18.045568"
> +       id="text803-1-4-3"
> +       transform="translate(-13.394978,59.135217)"><tspan
> +         x="54.136707"
> +         y="18.045568"
> +         id="tspan1057">dmadev</tspan></text>
> +    <rect
> +       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
> +       id="rect31-9-5-8-0"
> +       width="60.902534"
> +       height="24.616455"
> +       x="25.196909"
> +       y="98.47744"
> +       ry="0" />
> +    <text
> +       xml:space="preserve"
> +       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
> +       x="54.136707"
> +       y="18.045568"
> +       id="text803-1-4-3-76"
> +       transform="translate(-24.485484,90.97883)"><tspan
> +         x="54.136707"
> +         y="18.045568"
> +         id="tspan1059">hardware DMA </tspan><tspan
> +         x="54.136707"
> +         y="26.865018"
> +         id="tspan1061">channel</tspan></text>
> +    <rect
> +       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
> +       id="rect31-9-5-8-0-6"
> +       width="60.902534"
> +       height="24.616455"
> +       x="132.20036"
> +       y="98.47744"
> +       ry="0" />
> +    <text
> +       xml:space="preserve"
> +       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
> +       x="54.136707"
> +       y="18.045568"
> +       id="text803-1-4-3-76-7"
> +       transform="translate(82.950904,90.79085)"><tspan
> +         x="54.136707"
> +         y="18.045568"
> +         id="tspan1063">hardware DMA </tspan><tspan
> +         x="54.136707"
> +         y="26.865018"
> +         id="tspan1065">channel</tspan></text>
> +    <rect
> +       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
> +       id="rect31-9-5-8-0-4"
> +       width="60.902534"
> +       height="24.616455"
> +       x="76.810928"
> +       y="140.12741"
> +       ry="0" />
> +    <text
> +       xml:space="preserve"
> +       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
> +       x="54.136707"
> +       y="18.045568"
> +       id="text803-1-4-3-76-4"
> +       transform="translate(27.032341,133.10574)"><tspan
> +         x="54.136707"
> +         y="18.045568"
> +         id="tspan1067">hardware DMA </tspan><tspan
> +         x="54.136707"
> +         y="26.865018"
> +         id="tspan1069">controller</tspan></text>
> +    <rect
> +       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
> +       id="rect31-9-5-8-5"
> +       width="38.34557"
> +       height="19.729115"
> +       x="143.43863"
> +       y="64.827354"
> +       ry="0" />
> +    <text
> +       xml:space="preserve"
> +       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
> +       x="54.136707"
> +       y="18.045568"
> +       id="text803-1-4-3-7"
> +       transform="translate(94.92597,59.664385)"><tspan
> +         x="54.136707"
> +         y="18.045568"
> +         id="tspan1071">dmadev</tspan></text>
> +    <path
> +       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
> +       d="M 74.476373,49.527306 62.82407,64.827354"
> +       id="path45308"
> +       inkscape:connector-type="polyline"
> +       inkscape:connector-curvature="0"
> +       inkscape:connection-start="#rect31-9-5"
> +       inkscape:connection-end="#rect31-9-5-8" />
> +    <path
> +       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
> +       d="M 35.924309,49.527306 47.711612,64.827354"
> +       id="path45310"
> +       inkscape:connector-type="polyline"
> +       inkscape:connector-curvature="0"
> +       inkscape:connection-start="#rect31-9"
> +       inkscape:connection-end="#rect31-9-5-8" />
> +    <path
> +       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
> +       d="M 55.403414,84.556469 55.53332,98.47744"
> +       id="path45312"
> +       inkscape:connector-type="polyline"
> +       inkscape:connector-curvature="0"
> +       inkscape:connection-start="#rect31-9-5-8"
> +       inkscape:connection-end="#rect31-9-5-8-0" />
> +    <path
> +       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
> +       d="m 162.62241,84.556469 0.0155,13.920971"
> +       id="path45320"
> +       inkscape:connector-type="polyline"
> +       inkscape:connector-curvature="0"
> +       inkscape:connection-start="#rect31-9-5-8-5"
> +       inkscape:connection-end="#rect31-9-5-8-0-6" />
> +    <path
> +       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
> +       d="m 146.28317,123.09389 -22.65252,17.03352"
> +       id="path45586"
> +       inkscape:connector-type="polyline"
> +       inkscape:connector-curvature="0"
> +       inkscape:connection-start="#rect31-9-5-8-0-6"
> +       inkscape:connection-end="#rect31-9-5-8-0-4" />
> +    <path
> +       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
> +       d="m 70.900938,123.09389 21.108496,17.03352"
> +       id="path45588"
> +       inkscape:connector-type="polyline"
> +       inkscape:connector-curvature="0"
> +       inkscape:connection-start="#rect31-9-5-8-0"
> +       inkscape:connection-end="#rect31-9-5-8-0-4" />
> +    <path
> +       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
> +       d="m 162.50039,49.527306 0.0675,15.300048"
> +       id="path45956"
> +       inkscape:connector-type="polyline"
> +       inkscape:connector-curvature="0"
> +       inkscape:connection-start="#rect31-9-5-3"
> +       inkscape:connection-end="#rect31-9-5-8-5" />
> +  </g>
> +</svg>
> diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
> index 2dce507f46..0abea06b24 100644
> --- a/doc/guides/prog_guide/index.rst
> +++ b/doc/guides/prog_guide/index.rst
> @@ -29,6 +29,7 @@ Programmer's Guide
>      regexdev
>      rte_security
>      rawdev
> +    dmadev
>      link_bonding_poll_mode_drv_lib
>      timer_lib
>      hash_lib
> --
> 2.33.0
>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v18 8/8] maintainers: add for dmadev
  2021-09-04  7:02           ` fengchengwen
@ 2021-09-06  1:46             ` Li, Xiaoyun
  2021-09-06  8:00               ` fengchengwen
  0 siblings, 1 reply; 339+ messages in thread
From: Li, Xiaoyun @ 2021-09-06  1:46 UTC (permalink / raw)
  To: fengchengwen, Maxime Coquelin, Richardson, Bruce
  Cc: thomas, Yigit, Ferruh, jerinj, jerinjacobk, andrew.rybchenko,
	dev, mb, nipun.gupta, hemant.agrawal, honnappa.nagarahalli,
	david.marchand, sburla, pkapoor, Ananyev, Konstantin, Walsh,
	Conor, Xia, Chenbo

Hi

> -----Original Message-----
> From: fengchengwen <fengchengwen@huawei.com>
> Sent: Saturday, September 4, 2021 15:02
> To: Maxime Coquelin <maxime.coquelin@redhat.com>; Richardson, Bruce
> <bruce.richardson@intel.com>; Li, Xiaoyun <xiaoyun.li@intel.com>
> Cc: thomas@monjalon.net; Yigit, Ferruh <ferruh.yigit@intel.com>;
> jerinj@marvell.com; jerinjacobk@gmail.com; andrew.rybchenko@oktetlabs.ru;
> dev@dpdk.org; mb@smartsharesystems.com; nipun.gupta@nxp.com;
> hemant.agrawal@nxp.com; honnappa.nagarahalli@arm.com;
> david.marchand@redhat.com; sburla@marvell.com; pkapoor@marvell.com;
> Ananyev, Konstantin <konstantin.ananyev@intel.com>; Walsh, Conor
> <conor.walsh@intel.com>; Xia, Chenbo <chenbo.xia@intel.com>
> Subject: Re: [PATCH v18 8/8] maintainers: add for dmadev
> 
> On 2021/9/3 20:59, Maxime Coquelin wrote:
> > Hi,
> >
> > On 9/2/21 3:39 PM, fengchengwen wrote:
> >> Fix in v19
> >>
> >> I think there many patches wait for dmadev framework upstream, so
> >> could you help review unreviewd patches (like dma/skeleton and app/test)?
> >
> > Thanks for all the work, it looks really promising!
> >
> >> Also, we have developed the dma driver for hisilicon, and
> >> corresponding test app (like examples/vhost and testpmd) is being developed.
> >>
> >> examples/vhost: will extend support dmadev type.
> >
> > I think Sunil has posted RFC for Vhost lib and example, you might want
> > to have a look to avoid duplicate work.
> 
> Got it, thanks
> 
> >
> >> testpmd: will introduces some dma control commands and the dma
> forwarding mode.
> >>          the dma forwarding mode process:
> >>               // 1st: call rte_eth_rx_burst
> >>               // 2st: post received packet to dma, move data to another packet
> >>               // 3st: set newcopyed rte_mbuf header
> >>               // 4st: free received packet
> >>               // 5st: get dma completed request and associate it rte_mbuf
> >>               // 6st: send 5st's rte_mbuf to nic
> >>
> >> @Maxime @Chenbo @Xiaoyun Like to hear your opinion.

What're some dma control commands? Can you give some examples here?

And the fwding mode in testpmd sounds like a function test to verify functions of dmadev.
Why don't you just test in app/test like test_eventdev or test_cryptodev? What's the purpose of this dma forwarding mode since it's just dma-copy from one mbuf to another?

> >
> > We might also think of adding async support to Vhost PMD, that would
> > be another way to test dmadev with testpmd.

+1 on this. This makes more sense.

> 
> OK
> 
> >
> > Thanks,
> > Maxime
> >
> >>
> >> On 2021/9/2 19:51, Bruce Richardson wrote:
> >>> On Thu, Sep 02, 2021 at 06:54:17PM +0800, Chengwen Feng wrote:
> >>>> This patch add myself as dmadev's maintainer and update release notes.
> >>>>
> >>>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com> ---
> >>>
> >>> Just in case you are doing any further revisions of this patchset,
> >>> the maintainers entry, and RN entry, generally is added in the first
> >>> patch, so squash this patch into patch #1.
> >>> .
> >>>
> >>
> >
> > .
> >

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v18 8/8] maintainers: add for dmadev
  2021-09-03 12:59         ` Maxime Coquelin
  2021-09-04  7:02           ` fengchengwen
@ 2021-09-06  2:03           ` Xia, Chenbo
  2021-09-06  8:01             ` fengchengwen
  1 sibling, 1 reply; 339+ messages in thread
From: Xia, Chenbo @ 2021-09-06  2:03 UTC (permalink / raw)
  To: Maxime Coquelin, fengchengwen, Richardson, Bruce, Li, Xiaoyun
  Cc: thomas, Yigit, Ferruh, jerinj, jerinjacobk, andrew.rybchenko,
	dev, mb, nipun.gupta, hemant.agrawal, honnappa.nagarahalli,
	david.marchand, sburla, pkapoor, Ananyev, Konstantin, Walsh,
	Conor

Hi,

> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Friday, September 3, 2021 8:59 PM
> To: fengchengwen <fengchengwen@huawei.com>; Richardson, Bruce
> <bruce.richardson@intel.com>; Li, Xiaoyun <xiaoyun.li@intel.com>
> Cc: thomas@monjalon.net; Yigit, Ferruh <ferruh.yigit@intel.com>;
> jerinj@marvell.com; jerinjacobk@gmail.com; andrew.rybchenko@oktetlabs.ru;
> dev@dpdk.org; mb@smartsharesystems.com; nipun.gupta@nxp.com;
> hemant.agrawal@nxp.com; honnappa.nagarahalli@arm.com;
> david.marchand@redhat.com; sburla@marvell.com; pkapoor@marvell.com; Ananyev,
> Konstantin <konstantin.ananyev@intel.com>; Walsh, Conor
> <conor.walsh@intel.com>; Xia, Chenbo <chenbo.xia@intel.com>
> Subject: Re: [PATCH v18 8/8] maintainers: add for dmadev
> 
> Hi,
> 
> On 9/2/21 3:39 PM, fengchengwen wrote:
> > Fix in v19
> >
> > I think there many patches wait for dmadev framework upstream, so
> > could you help review unreviewd patches (like dma/skeleton and app/test)?
> 
> Thanks for all the work, it looks really promising!
> 
> > Also, we have developed the dma driver for hisilicon, and corresponding test
> > app (like examples/vhost and testpmd) is being developed.
> >
> > examples/vhost: will extend support dmadev type.
> 
> I think Sunil has posted RFC for Vhost lib and example, you might want
> to have a look to avoid duplicate work.
> 
> > testpmd: will introduces some dma control commands and the dma forwarding
> mode.
> >          the dma forwarding mode process:
> >               // 1st: call rte_eth_rx_burst
> >               // 2st: post received packet to dma, move data to another
> packet
> >               // 3st: set newcopyed rte_mbuf header
> >               // 4st: free received packet
> >               // 5st: get dma completed request and associate it rte_mbuf
> >               // 6st: send 5st's rte_mbuf to nic
> >
> > @Maxime @Chenbo @Xiaoyun Like to hear your opinion.
> 
> We might also think of adding async support to Vhost PMD, that would be
> another way to test dmadev with testpmd.

+1 for this. I would prefer support in vhost pmd so that it could be tested with
testpmd.

About the testpmd patches, if it's for testing dma device itself, usually we create a new
app (now testpmd only tests ethdev). If vhost pmd also needs some user input, just
make it a devarg. Make sense to you?

Thanks,
Chenbo

> 
> Thanks,
> Maxime
> 
> >
> > On 2021/9/2 19:51, Bruce Richardson wrote:
> >> On Thu, Sep 02, 2021 at 06:54:17PM +0800, Chengwen Feng wrote:
> >>> This patch add myself as dmadev's maintainer and update release notes.
> >>>
> >>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com> ---
> >>
> >> Just in case you are doing any further revisions of this patchset, the
> >> maintainers entry, and RN entry, generally is added in the first patch, so
> >> squash this patch into patch #1.
> >> .
> >>
> >


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library public APIs
  2021-09-04  1:31       ` fengchengwen
@ 2021-09-06  6:48         ` Gagandeep Singh
  2021-09-06  7:52           ` fengchengwen
  0 siblings, 1 reply; 339+ messages in thread
From: Gagandeep Singh @ 2021-09-06  6:48 UTC (permalink / raw)
  To: fengchengwen, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, Nipun Gupta, Hemant Agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh



> -----Original Message-----
> From: fengchengwen <fengchengwen@huawei.com>
> Sent: Saturday, September 4, 2021 7:02 AM
> To: Gagandeep Singh <G.Singh@nxp.com>; thomas@monjalon.net;
> ferruh.yigit@intel.com; bruce.richardson@intel.com; jerinj@marvell.com;
> jerinjacobk@gmail.com; andrew.rybchenko@oktetlabs.ru
> Cc: dev@dpdk.org; mb@smartsharesystems.com; Nipun Gupta
> <nipun.gupta@nxp.com>; Hemant Agrawal <hemant.agrawal@nxp.com>;
> maxime.coquelin@redhat.com; honnappa.nagarahalli@arm.com;
> david.marchand@redhat.com; sburla@marvell.com; pkapoor@marvell.com;
> konstantin.ananyev@intel.com; conor.walsh@intel.com
> Subject: Re: [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library
> public APIs
> 
> On 2021/9/3 19:42, Gagandeep Singh wrote:
> > Hi,
> >
> > <snip>
> >> +
> >> +/**
> >> + * @warning
> >> + * @b EXPERIMENTAL: this API may change without prior notice.
> >> + *
> >> + * Close a DMA device.
> >> + *
> >> + * The device cannot be restarted after this call.
> >> + *
> >> + * @param dev_id
> >> + *   The identifier of the device.
> >> + *
> >> + * @return
> >> + *   0 on success. Otherwise negative value is returned.
> >> + */
> >> +__rte_experimental
> >> +int
> >> +rte_dmadev_close(uint16_t dev_id);
> >> +
> >> +/**
> >> + * rte_dma_direction - DMA transfer direction defines.
> >> + */
> >> +enum rte_dma_direction {
> >> +	RTE_DMA_DIR_MEM_TO_MEM,
> >> +	/**< DMA transfer direction - from memory to memory.
> >> +	 *
> >> +	 * @see struct rte_dmadev_vchan_conf::direction
> >> +	 */
> >> +	RTE_DMA_DIR_MEM_TO_DEV,
> >> +	/**< DMA transfer direction - from memory to device.
> >> +	 * In a typical scenario, the SoCs are installed on host servers as
> >> +	 * iNICs through the PCIe interface. In this case, the SoCs works in
> >> +	 * EP(endpoint) mode, it could initiate a DMA move request from
> >> memory
> >> +	 * (which is SoCs memory) to device (which is host memory).
> >> +	 *
> >> +	 * @see struct rte_dmadev_vchan_conf::direction
> >> +	 */
> >> +	RTE_DMA_DIR_DEV_TO_MEM,
> >> +	/**< DMA transfer direction - from device to memory.
> >> +	 * In a typical scenario, the SoCs are installed on host servers as
> >> +	 * iNICs through the PCIe interface. In this case, the SoCs works in
> >> +	 * EP(endpoint) mode, it could initiate a DMA move request from device
> >> +	 * (which is host memory) to memory (which is SoCs memory).
> >> +	 *
> >> +	 * @see struct rte_dmadev_vchan_conf::direction
> >> +	 */
> >> +	RTE_DMA_DIR_DEV_TO_DEV,
> >> +	/**< DMA transfer direction - from device to device.
> >> +	 * In a typical scenario, the SoCs are installed on host servers as
> >> +	 * iNICs through the PCIe interface. In this case, the SoCs works in
> >> +	 * EP(endpoint) mode, it could initiate a DMA move request from device
> >> +	 * (which is host memory) to the device (which is another host memory).
> >> +	 *
> >> +	 * @see struct rte_dmadev_vchan_conf::direction
> >> +	 */
> >> +};
> >> +
> >> +/**
> >> ..
> > The enum rte_dma_direction must have a member RTE_DMA_DIR_ANY for a
> channel that supports all 4 directions.
> 
> We've discussed this issue before. The earliest solution was to set up channels to
> support multiple DIRs, but
> no hardware/driver actually used this (at least at that time). they (like
> octeontx2_dma/dpaa) all setup one logic
> channel server single transfer direction.
> 
> So, do you have that kind of desire for your driver ?
> 
Both DPAA1 and DPAA2 drivers can support ANY direction on a channel, so we would like to have this option as well.

> 
> If you have a strong desire, we'll consider the following options:
> 
> Once the channel was setup, there are no other parameters to indicate the copy
> request's transfer direction.
> So I think it is not enough to define RTE_DMA_DIR_ANY only.
> 
> Maybe we could add RTE_DMA_OP_xxx marco
> (RTE_DMA_OP_FLAG_M2M/M2D/D2M/D2D), these macro will as the flags
> parameter
> passsed to enqueue API, so the enqueue API knows which transfer direction the
> request corresponding.
> 
> We can easily expand from the existing framework with following:
> a. define capability RTE_DMADEV_CAPA_DIR_ANY, for those device which
> support it could declare it.
> b. define direction macro: RTE_DMA_DIR_ANY
> c. define dma_op: RTE_DMA_OP_FLAG_DIR_M2M/M2D/D2M/D2D which will
> passed as the flags parameters.
> 
> For that driver which don't support this feature, just don't declare support it, and
> framework ensure that
> RTE_DMA_DIR_ANY is not passed down, and it can ignored
> RTE_DMA_OP_FLAG_DIR_xxx flag when enqueue API.
> 
> For that driver which support this feature, application could create one channel
> with RTE_DMA_DIR_ANY or RTE_DMA_DIR_MEM_TO_MEM.
> If created with RTE_DMA_DIR_ANY, the RTE_DMA_OP_FLAG_DIR_xxx should be
> sensed in the driver.
> If created with RTE_DMA_DIR_MEM_TO_MEM, the
> RTE_DMA_OP_FLAG_DIR_xxx could be ignored.
> 
Your design looks ok to me.

> 
> > <snip>
> >
> >
> > Regards,
> > Gagan
> >

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library public APIs
  2021-09-06  6:48         ` Gagandeep Singh
@ 2021-09-06  7:52           ` fengchengwen
  2021-09-06  8:06             ` Jerin Jacob
                               ` (2 more replies)
  0 siblings, 3 replies; 339+ messages in thread
From: fengchengwen @ 2021-09-06  7:52 UTC (permalink / raw)
  To: Gagandeep Singh, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, Nipun Gupta, Hemant Agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

I think we can add support for DIR_ANY.
@Bruce @Jerin Would you please take a look at my proposal?

On 2021/9/6 14:48, Gagandeep Singh wrote:
> 
> 
>> -----Original Message-----
>> From: fengchengwen <fengchengwen@huawei.com>
>> Sent: Saturday, September 4, 2021 7:02 AM
>> To: Gagandeep Singh <G.Singh@nxp.com>; thomas@monjalon.net;
>> ferruh.yigit@intel.com; bruce.richardson@intel.com; jerinj@marvell.com;
>> jerinjacobk@gmail.com; andrew.rybchenko@oktetlabs.ru
>> Cc: dev@dpdk.org; mb@smartsharesystems.com; Nipun Gupta
>> <nipun.gupta@nxp.com>; Hemant Agrawal <hemant.agrawal@nxp.com>;
>> maxime.coquelin@redhat.com; honnappa.nagarahalli@arm.com;
>> david.marchand@redhat.com; sburla@marvell.com; pkapoor@marvell.com;
>> konstantin.ananyev@intel.com; conor.walsh@intel.com
>> Subject: Re: [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library
>> public APIs
>>
>> On 2021/9/3 19:42, Gagandeep Singh wrote:
>>> Hi,
>>>
>>> <snip>
>>>> +
>>>> +/**
>>>> + * @warning
>>>> + * @b EXPERIMENTAL: this API may change without prior notice.
>>>> + *
>>>> + * Close a DMA device.
>>>> + *
>>>> + * The device cannot be restarted after this call.
>>>> + *
>>>> + * @param dev_id
>>>> + *   The identifier of the device.
>>>> + *
>>>> + * @return
>>>> + *   0 on success. Otherwise negative value is returned.
>>>> + */
>>>> +__rte_experimental
>>>> +int
>>>> +rte_dmadev_close(uint16_t dev_id);
>>>> +
>>>> +/**
>>>> + * rte_dma_direction - DMA transfer direction defines.
>>>> + */
>>>> +enum rte_dma_direction {
>>>> +	RTE_DMA_DIR_MEM_TO_MEM,
>>>> +	/**< DMA transfer direction - from memory to memory.
>>>> +	 *
>>>> +	 * @see struct rte_dmadev_vchan_conf::direction
>>>> +	 */
>>>> +	RTE_DMA_DIR_MEM_TO_DEV,
>>>> +	/**< DMA transfer direction - from memory to device.
>>>> +	 * In a typical scenario, the SoCs are installed on host servers as
>>>> +	 * iNICs through the PCIe interface. In this case, the SoCs works in
>>>> +	 * EP(endpoint) mode, it could initiate a DMA move request from
>>>> memory
>>>> +	 * (which is SoCs memory) to device (which is host memory).
>>>> +	 *
>>>> +	 * @see struct rte_dmadev_vchan_conf::direction
>>>> +	 */
>>>> +	RTE_DMA_DIR_DEV_TO_MEM,
>>>> +	/**< DMA transfer direction - from device to memory.
>>>> +	 * In a typical scenario, the SoCs are installed on host servers as
>>>> +	 * iNICs through the PCIe interface. In this case, the SoCs works in
>>>> +	 * EP(endpoint) mode, it could initiate a DMA move request from device
>>>> +	 * (which is host memory) to memory (which is SoCs memory).
>>>> +	 *
>>>> +	 * @see struct rte_dmadev_vchan_conf::direction
>>>> +	 */
>>>> +	RTE_DMA_DIR_DEV_TO_DEV,
>>>> +	/**< DMA transfer direction - from device to device.
>>>> +	 * In a typical scenario, the SoCs are installed on host servers as
>>>> +	 * iNICs through the PCIe interface. In this case, the SoCs works in
>>>> +	 * EP(endpoint) mode, it could initiate a DMA move request from device
>>>> +	 * (which is host memory) to the device (which is another host memory).
>>>> +	 *
>>>> +	 * @see struct rte_dmadev_vchan_conf::direction
>>>> +	 */
>>>> +};
>>>> +
>>>> +/**
>>>> ..
>>> The enum rte_dma_direction must have a member RTE_DMA_DIR_ANY for a
>> channel that supports all 4 directions.
>>
>> We've discussed this issue before. The earliest solution was to set up channels to
>> support multiple DIRs, but
>> no hardware/driver actually used this (at least at that time). they (like
>> octeontx2_dma/dpaa) all setup one logic
>> channel server single transfer direction.
>>
>> So, do you have that kind of desire for your driver ?
>>
> Both DPAA1 and DPAA2 drivers can support ANY direction on a channel, so we would like to have this option as well.
> 
>>
>> If you have a strong desire, we'll consider the following options:
>>
>> Once the channel was setup, there are no other parameters to indicate the copy
>> request's transfer direction.
>> So I think it is not enough to define RTE_DMA_DIR_ANY only.
>>
>> Maybe we could add RTE_DMA_OP_xxx marco
>> (RTE_DMA_OP_FLAG_M2M/M2D/D2M/D2D), these macro will as the flags
>> parameter
>> passsed to enqueue API, so the enqueue API knows which transfer direction the
>> request corresponding.
>>
>> We can easily expand from the existing framework with following:
>> a. define capability RTE_DMADEV_CAPA_DIR_ANY, for those device which
>> support it could declare it.
>> b. define direction macro: RTE_DMA_DIR_ANY
>> c. define dma_op: RTE_DMA_OP_FLAG_DIR_M2M/M2D/D2M/D2D which will
>> passed as the flags parameters.
>>
>> For that driver which don't support this feature, just don't declare support it, and
>> framework ensure that
>> RTE_DMA_DIR_ANY is not passed down, and it can ignored
>> RTE_DMA_OP_FLAG_DIR_xxx flag when enqueue API.
>>
>> For that driver which support this feature, application could create one channel
>> with RTE_DMA_DIR_ANY or RTE_DMA_DIR_MEM_TO_MEM.
>> If created with RTE_DMA_DIR_ANY, the RTE_DMA_OP_FLAG_DIR_xxx should be
>> sensed in the driver.
>> If created with RTE_DMA_DIR_MEM_TO_MEM, the
>> RTE_DMA_OP_FLAG_DIR_xxx could be ignored.
>>
> Your design looks ok to me.
> 
>>
>>> <snip>
>>>
>>>
>>> Regards,
>>> Gagan
>>>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v18 8/8] maintainers: add for dmadev
  2021-09-06  1:46             ` Li, Xiaoyun
@ 2021-09-06  8:00               ` fengchengwen
  0 siblings, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-09-06  8:00 UTC (permalink / raw)
  To: Li, Xiaoyun, Maxime Coquelin, Richardson, Bruce
  Cc: thomas, Yigit, Ferruh, jerinj, jerinjacobk, andrew.rybchenko,
	dev, mb, nipun.gupta, hemant.agrawal, honnappa.nagarahalli,
	david.marchand, sburla, pkapoor, Ananyev, Konstantin, Walsh,
	Conor, Xia, Chenbo

On 2021/9/6 9:46, Li, Xiaoyun wrote:
> Hi
> 
>> -----Original Message-----
>> From: fengchengwen <fengchengwen@huawei.com>
>> Sent: Saturday, September 4, 2021 15:02
>> To: Maxime Coquelin <maxime.coquelin@redhat.com>; Richardson, Bruce
>> <bruce.richardson@intel.com>; Li, Xiaoyun <xiaoyun.li@intel.com>
>> Cc: thomas@monjalon.net; Yigit, Ferruh <ferruh.yigit@intel.com>;
>> jerinj@marvell.com; jerinjacobk@gmail.com; andrew.rybchenko@oktetlabs.ru;
>> dev@dpdk.org; mb@smartsharesystems.com; nipun.gupta@nxp.com;
>> hemant.agrawal@nxp.com; honnappa.nagarahalli@arm.com;
>> david.marchand@redhat.com; sburla@marvell.com; pkapoor@marvell.com;
>> Ananyev, Konstantin <konstantin.ananyev@intel.com>; Walsh, Conor
>> <conor.walsh@intel.com>; Xia, Chenbo <chenbo.xia@intel.com>
>> Subject: Re: [PATCH v18 8/8] maintainers: add for dmadev
>>
>> On 2021/9/3 20:59, Maxime Coquelin wrote:
>>> Hi,
>>>
>>> On 9/2/21 3:39 PM, fengchengwen wrote:
>>>> Fix in v19
>>>>
>>>> I think there many patches wait for dmadev framework upstream, so
>>>> could you help review unreviewd patches (like dma/skeleton and app/test)?
>>>
>>> Thanks for all the work, it looks really promising!
>>>
>>>> Also, we have developed the dma driver for hisilicon, and
>>>> corresponding test app (like examples/vhost and testpmd) is being developed.
>>>>
>>>> examples/vhost: will extend support dmadev type.
>>>
>>> I think Sunil has posted RFC for Vhost lib and example, you might want
>>> to have a look to avoid duplicate work.
>>
>> Got it, thanks
>>
>>>
>>>> testpmd: will introduces some dma control commands and the dma
>> forwarding mode.
>>>>          the dma forwarding mode process:
>>>>               // 1st: call rte_eth_rx_burst
>>>>               // 2st: post received packet to dma, move data to another packet
>>>>               // 3st: set newcopyed rte_mbuf header
>>>>               // 4st: free received packet
>>>>               // 5st: get dma completed request and associate it rte_mbuf
>>>>               // 6st: send 5st's rte_mbuf to nic
>>>>
>>>> @Maxime @Chenbo @Xiaoyun Like to hear your opinion.
> 
> What're some dma control commands? Can you give some examples here?

It is only a plan. Our test team reports that some test points, such as configuring the queue depth, are required.

Based on the current discussion, we are not prepared to support this feature, this thread could be closed.

> 
> And the fwding mode in testpmd sounds like a function test to verify functions of dmadev.
> Why don't you just test in app/test like test_eventdev or test_cryptodev? What's the purpose of this dma forwarding mode since it's just dma-copy from one mbuf to another?
> 
>>>
>>> We might also think of adding async support to Vhost PMD, that would
>>> be another way to test dmadev with testpmd.
> 
> +1 on this. This makes more sense.
> 
>>
>> OK
>>
>>>
>>> Thanks,
>>> Maxime
>>>
>>>>
>>>> On 2021/9/2 19:51, Bruce Richardson wrote:
>>>>> On Thu, Sep 02, 2021 at 06:54:17PM +0800, Chengwen Feng wrote:
>>>>>> This patch add myself as dmadev's maintainer and update release notes.
>>>>>>
>>>>>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com> ---
>>>>>
>>>>> Just in case you are doing any further revisions of this patchset,
>>>>> the maintainers entry, and RN entry, generally is added in the first
>>>>> patch, so squash this patch into patch #1.
>>>>> .
>>>>>
>>>>
>>>
>>> .
>>>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v18 8/8] maintainers: add for dmadev
  2021-09-06  2:03           ` Xia, Chenbo
@ 2021-09-06  8:01             ` fengchengwen
  0 siblings, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-09-06  8:01 UTC (permalink / raw)
  To: Xia, Chenbo, Maxime Coquelin, Richardson, Bruce, Li, Xiaoyun
  Cc: thomas, Yigit, Ferruh, jerinj, jerinjacobk, andrew.rybchenko,
	dev, mb, nipun.gupta, hemant.agrawal, honnappa.nagarahalli,
	david.marchand, sburla, pkapoor, Ananyev, Konstantin, Walsh,
	Conor

On 2021/9/6 10:03, Xia, Chenbo wrote:
> Hi,
> 
>> -----Original Message-----
>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
>> Sent: Friday, September 3, 2021 8:59 PM
>> To: fengchengwen <fengchengwen@huawei.com>; Richardson, Bruce
>> <bruce.richardson@intel.com>; Li, Xiaoyun <xiaoyun.li@intel.com>
>> Cc: thomas@monjalon.net; Yigit, Ferruh <ferruh.yigit@intel.com>;
>> jerinj@marvell.com; jerinjacobk@gmail.com; andrew.rybchenko@oktetlabs.ru;
>> dev@dpdk.org; mb@smartsharesystems.com; nipun.gupta@nxp.com;
>> hemant.agrawal@nxp.com; honnappa.nagarahalli@arm.com;
>> david.marchand@redhat.com; sburla@marvell.com; pkapoor@marvell.com; Ananyev,
>> Konstantin <konstantin.ananyev@intel.com>; Walsh, Conor
>> <conor.walsh@intel.com>; Xia, Chenbo <chenbo.xia@intel.com>
>> Subject: Re: [PATCH v18 8/8] maintainers: add for dmadev
>>
>> Hi,
>>
>> On 9/2/21 3:39 PM, fengchengwen wrote:
>>> Fix in v19
>>>
>>> I think there many patches wait for dmadev framework upstream, so
>>> could you help review unreviewd patches (like dma/skeleton and app/test)?
>>
>> Thanks for all the work, it looks really promising!
>>
>>> Also, we have developed the dma driver for hisilicon, and corresponding test
>>> app (like examples/vhost and testpmd) is being developed.
>>>
>>> examples/vhost: will extend support dmadev type.
>>
>> I think Sunil has posted RFC for Vhost lib and example, you might want
>> to have a look to avoid duplicate work.
>>
>>> testpmd: will introduces some dma control commands and the dma forwarding
>> mode.
>>>          the dma forwarding mode process:
>>>               // 1st: call rte_eth_rx_burst
>>>               // 2st: post received packet to dma, move data to another
>> packet
>>>               // 3st: set newcopyed rte_mbuf header
>>>               // 4st: free received packet
>>>               // 5st: get dma completed request and associate it rte_mbuf
>>>               // 6st: send 5st's rte_mbuf to nic
>>>
>>> @Maxime @Chenbo @Xiaoyun Like to hear your opinion.
>>
>> We might also think of adding async support to Vhost PMD, that would be
>> another way to test dmadev with testpmd.
> 
> +1 for this. I would prefer support in vhost pmd so that it could be tested with
> testpmd.
> 
> About the testpmd patches, if it's for testing dma device itself, usually we create a new
> app (now testpmd only tests ethdev). If vhost pmd also needs some user input, just
> make it a devarg. Make sense to you?

Got it, thanks

> 
> Thanks,
> Chenbo
> 
>>
>> Thanks,
>> Maxime
>>
>>>
>>> On 2021/9/2 19:51, Bruce Richardson wrote:
>>>> On Thu, Sep 02, 2021 at 06:54:17PM +0800, Chengwen Feng wrote:
>>>>> This patch add myself as dmadev's maintainer and update release notes.
>>>>>
>>>>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com> ---
>>>>
>>>> Just in case you are doing any further revisions of this patchset, the
>>>> maintainers entry, and RN entry, generally is added in the first patch, so
>>>> squash this patch into patch #1.
>>>> .
>>>>
>>>
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library public APIs
  2021-09-06  7:52           ` fengchengwen
@ 2021-09-06  8:06             ` Jerin Jacob
  2021-09-06  8:08             ` Bruce Richardson
  2021-09-07 12:55             ` fengchengwen
  2 siblings, 0 replies; 339+ messages in thread
From: Jerin Jacob @ 2021-09-06  8:06 UTC (permalink / raw)
  To: fengchengwen
  Cc: Gagandeep Singh, thomas, ferruh.yigit, bruce.richardson, jerinj,
	andrew.rybchenko, dev, mb, Nipun Gupta, Hemant Agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh

On Mon, Sep 6, 2021 at 1:22 PM fengchengwen <fengchengwen@huawei.com> wrote:
>
> I think we can add support for DIR_ANY.
> @Bruce @Jerin Would you please take a look at my proposal?

Since the channel is virtual, it will be cheap to avoid any fast path
flags and keep the current scheme
as it max we will have 4 channels for directions.
No strong opinion, if other things, that is the better way, I think,
it is okay too.


>
> On 2021/9/6 14:48, Gagandeep Singh wrote:
> >
> >
> >> -----Original Message-----
> >> From: fengchengwen <fengchengwen@huawei.com>
> >> Sent: Saturday, September 4, 2021 7:02 AM
> >> To: Gagandeep Singh <G.Singh@nxp.com>; thomas@monjalon.net;
> >> ferruh.yigit@intel.com; bruce.richardson@intel.com; jerinj@marvell.com;
> >> jerinjacobk@gmail.com; andrew.rybchenko@oktetlabs.ru
> >> Cc: dev@dpdk.org; mb@smartsharesystems.com; Nipun Gupta
> >> <nipun.gupta@nxp.com>; Hemant Agrawal <hemant.agrawal@nxp.com>;
> >> maxime.coquelin@redhat.com; honnappa.nagarahalli@arm.com;
> >> david.marchand@redhat.com; sburla@marvell.com; pkapoor@marvell.com;
> >> konstantin.ananyev@intel.com; conor.walsh@intel.com
> >> Subject: Re: [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library
> >> public APIs
> >>
> >> On 2021/9/3 19:42, Gagandeep Singh wrote:
> >>> Hi,
> >>>
> >>> <snip>
> >>>> +
> >>>> +/**
> >>>> + * @warning
> >>>> + * @b EXPERIMENTAL: this API may change without prior notice.
> >>>> + *
> >>>> + * Close a DMA device.
> >>>> + *
> >>>> + * The device cannot be restarted after this call.
> >>>> + *
> >>>> + * @param dev_id
> >>>> + *   The identifier of the device.
> >>>> + *
> >>>> + * @return
> >>>> + *   0 on success. Otherwise negative value is returned.
> >>>> + */
> >>>> +__rte_experimental
> >>>> +int
> >>>> +rte_dmadev_close(uint16_t dev_id);
> >>>> +
> >>>> +/**
> >>>> + * rte_dma_direction - DMA transfer direction defines.
> >>>> + */
> >>>> +enum rte_dma_direction {
> >>>> +  RTE_DMA_DIR_MEM_TO_MEM,
> >>>> +  /**< DMA transfer direction - from memory to memory.
> >>>> +   *
> >>>> +   * @see struct rte_dmadev_vchan_conf::direction
> >>>> +   */
> >>>> +  RTE_DMA_DIR_MEM_TO_DEV,
> >>>> +  /**< DMA transfer direction - from memory to device.
> >>>> +   * In a typical scenario, the SoCs are installed on host servers as
> >>>> +   * iNICs through the PCIe interface. In this case, the SoCs works in
> >>>> +   * EP(endpoint) mode, it could initiate a DMA move request from
> >>>> memory
> >>>> +   * (which is SoCs memory) to device (which is host memory).
> >>>> +   *
> >>>> +   * @see struct rte_dmadev_vchan_conf::direction
> >>>> +   */
> >>>> +  RTE_DMA_DIR_DEV_TO_MEM,
> >>>> +  /**< DMA transfer direction - from device to memory.
> >>>> +   * In a typical scenario, the SoCs are installed on host servers as
> >>>> +   * iNICs through the PCIe interface. In this case, the SoCs works in
> >>>> +   * EP(endpoint) mode, it could initiate a DMA move request from device
> >>>> +   * (which is host memory) to memory (which is SoCs memory).
> >>>> +   *
> >>>> +   * @see struct rte_dmadev_vchan_conf::direction
> >>>> +   */
> >>>> +  RTE_DMA_DIR_DEV_TO_DEV,
> >>>> +  /**< DMA transfer direction - from device to device.
> >>>> +   * In a typical scenario, the SoCs are installed on host servers as
> >>>> +   * iNICs through the PCIe interface. In this case, the SoCs works in
> >>>> +   * EP(endpoint) mode, it could initiate a DMA move request from device
> >>>> +   * (which is host memory) to the device (which is another host memory).
> >>>> +   *
> >>>> +   * @see struct rte_dmadev_vchan_conf::direction
> >>>> +   */
> >>>> +};
> >>>> +
> >>>> +/**
> >>>> ..
> >>> The enum rte_dma_direction must have a member RTE_DMA_DIR_ANY for a
> >> channel that supports all 4 directions.
> >>
> >> We've discussed this issue before. The earliest solution was to set up channels to
> >> support multiple DIRs, but
> >> no hardware/driver actually used this (at least at that time). they (like
> >> octeontx2_dma/dpaa) all setup one logic
> >> channel server single transfer direction.
> >>
> >> So, do you have that kind of desire for your driver ?
> >>
> > Both DPAA1 and DPAA2 drivers can support ANY direction on a channel, so we would like to have this option as well.
> >
> >>
> >> If you have a strong desire, we'll consider the following options:
> >>
> >> Once the channel was setup, there are no other parameters to indicate the copy
> >> request's transfer direction.
> >> So I think it is not enough to define RTE_DMA_DIR_ANY only.
> >>
> >> Maybe we could add RTE_DMA_OP_xxx marco
> >> (RTE_DMA_OP_FLAG_M2M/M2D/D2M/D2D), these macro will as the flags
> >> parameter
> >> passsed to enqueue API, so the enqueue API knows which transfer direction the
> >> request corresponding.
> >>
> >> We can easily expand from the existing framework with following:
> >> a. define capability RTE_DMADEV_CAPA_DIR_ANY, for those device which
> >> support it could declare it.
> >> b. define direction macro: RTE_DMA_DIR_ANY
> >> c. define dma_op: RTE_DMA_OP_FLAG_DIR_M2M/M2D/D2M/D2D which will
> >> passed as the flags parameters.
> >>
> >> For that driver which don't support this feature, just don't declare support it, and
> >> framework ensure that
> >> RTE_DMA_DIR_ANY is not passed down, and it can ignored
> >> RTE_DMA_OP_FLAG_DIR_xxx flag when enqueue API.
> >>
> >> For that driver which support this feature, application could create one channel
> >> with RTE_DMA_DIR_ANY or RTE_DMA_DIR_MEM_TO_MEM.
> >> If created with RTE_DMA_DIR_ANY, the RTE_DMA_OP_FLAG_DIR_xxx should be
> >> sensed in the driver.
> >> If created with RTE_DMA_DIR_MEM_TO_MEM, the
> >> RTE_DMA_OP_FLAG_DIR_xxx could be ignored.
> >>
> > Your design looks ok to me.
> >
> >>
> >>> <snip>
> >>>
> >>>
> >>> Regards,
> >>> Gagan
> >>>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library public APIs
  2021-09-06  7:52           ` fengchengwen
  2021-09-06  8:06             ` Jerin Jacob
@ 2021-09-06  8:08             ` Bruce Richardson
  2021-09-07 12:55             ` fengchengwen
  2 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-09-06  8:08 UTC (permalink / raw)
  To: fengchengwen
  Cc: Gagandeep Singh, thomas, ferruh.yigit, jerinj, jerinjacobk,
	andrew.rybchenko, dev, mb, Nipun Gupta, Hemant Agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh

On Mon, Sep 06, 2021 at 03:52:21PM +0800, fengchengwen wrote:
> I think we can add support for DIR_ANY.
> @Bruce @Jerin Would you please take a look at my proposal?
> 

I don't have a strong opinion on this. However, is one of the reasons we
have virtual-channels in the API rather than HW channels so that this
info can be encoded in the virtual channel setup? If a HW channel can
support multiple types of copy simultaneously, I thought the original
design was to create a vchan on this HW channel to support each copy type
needed?

> On 2021/9/6 14:48, Gagandeep Singh wrote:
> > 
> > 
> >> -----Original Message-----
> >> From: fengchengwen <fengchengwen@huawei.com>
> >> Sent: Saturday, September 4, 2021 7:02 AM
> >> To: Gagandeep Singh <G.Singh@nxp.com>; thomas@monjalon.net;
> >> ferruh.yigit@intel.com; bruce.richardson@intel.com; jerinj@marvell.com;
> >> jerinjacobk@gmail.com; andrew.rybchenko@oktetlabs.ru
> >> Cc: dev@dpdk.org; mb@smartsharesystems.com; Nipun Gupta
> >> <nipun.gupta@nxp.com>; Hemant Agrawal <hemant.agrawal@nxp.com>;
> >> maxime.coquelin@redhat.com; honnappa.nagarahalli@arm.com;
> >> david.marchand@redhat.com; sburla@marvell.com; pkapoor@marvell.com;
> >> konstantin.ananyev@intel.com; conor.walsh@intel.com
> >> Subject: Re: [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library
> >> public APIs
> >>
> >> On 2021/9/3 19:42, Gagandeep Singh wrote:
> >>> Hi,
> >>>
> >>> <snip>
> >>>> +
> >>>> +/**
> >>>> + * @warning
> >>>> + * @b EXPERIMENTAL: this API may change without prior notice.
> >>>> + *
> >>>> + * Close a DMA device.
> >>>> + *
> >>>> + * The device cannot be restarted after this call.
> >>>> + *
> >>>> + * @param dev_id
> >>>> + *   The identifier of the device.
> >>>> + *
> >>>> + * @return
> >>>> + *   0 on success. Otherwise negative value is returned.
> >>>> + */
> >>>> +__rte_experimental
> >>>> +int
> >>>> +rte_dmadev_close(uint16_t dev_id);
> >>>> +
> >>>> +/**
> >>>> + * rte_dma_direction - DMA transfer direction defines.
> >>>> + */
> >>>> +enum rte_dma_direction {
> >>>> +	RTE_DMA_DIR_MEM_TO_MEM,
> >>>> +	/**< DMA transfer direction - from memory to memory.
> >>>> +	 *
> >>>> +	 * @see struct rte_dmadev_vchan_conf::direction
> >>>> +	 */
> >>>> +	RTE_DMA_DIR_MEM_TO_DEV,
> >>>> +	/**< DMA transfer direction - from memory to device.
> >>>> +	 * In a typical scenario, the SoCs are installed on host servers as
> >>>> +	 * iNICs through the PCIe interface. In this case, the SoCs works in
> >>>> +	 * EP(endpoint) mode, it could initiate a DMA move request from
> >>>> memory
> >>>> +	 * (which is SoCs memory) to device (which is host memory).
> >>>> +	 *
> >>>> +	 * @see struct rte_dmadev_vchan_conf::direction
> >>>> +	 */
> >>>> +	RTE_DMA_DIR_DEV_TO_MEM,
> >>>> +	/**< DMA transfer direction - from device to memory.
> >>>> +	 * In a typical scenario, the SoCs are installed on host servers as
> >>>> +	 * iNICs through the PCIe interface. In this case, the SoCs works in
> >>>> +	 * EP(endpoint) mode, it could initiate a DMA move request from device
> >>>> +	 * (which is host memory) to memory (which is SoCs memory).
> >>>> +	 *
> >>>> +	 * @see struct rte_dmadev_vchan_conf::direction
> >>>> +	 */
> >>>> +	RTE_DMA_DIR_DEV_TO_DEV,
> >>>> +	/**< DMA transfer direction - from device to device.
> >>>> +	 * In a typical scenario, the SoCs are installed on host servers as
> >>>> +	 * iNICs through the PCIe interface. In this case, the SoCs works in
> >>>> +	 * EP(endpoint) mode, it could initiate a DMA move request from device
> >>>> +	 * (which is host memory) to the device (which is another host memory).
> >>>> +	 *
> >>>> +	 * @see struct rte_dmadev_vchan_conf::direction
> >>>> +	 */
> >>>> +};
> >>>> +
> >>>> +/**
> >>>> ..
> >>> The enum rte_dma_direction must have a member RTE_DMA_DIR_ANY for a
> >> channel that supports all 4 directions.
> >>
> >> We've discussed this issue before. The earliest solution was to set up channels to
> >> support multiple DIRs, but
> >> no hardware/driver actually used this (at least at that time). they (like
> >> octeontx2_dma/dpaa) all setup one logic
> >> channel server single transfer direction.
> >>
> >> So, do you have that kind of desire for your driver ?
> >>
> > Both DPAA1 and DPAA2 drivers can support ANY direction on a channel, so we would like to have this option as well.
> > 
> >>
> >> If you have a strong desire, we'll consider the following options:
> >>
> >> Once the channel was setup, there are no other parameters to indicate the copy
> >> request's transfer direction.
> >> So I think it is not enough to define RTE_DMA_DIR_ANY only.
> >>
> >> Maybe we could add RTE_DMA_OP_xxx marco
> >> (RTE_DMA_OP_FLAG_M2M/M2D/D2M/D2D), these macro will as the flags
> >> parameter
> >> passsed to enqueue API, so the enqueue API knows which transfer direction the
> >> request corresponding.
> >>
> >> We can easily expand from the existing framework with following:
> >> a. define capability RTE_DMADEV_CAPA_DIR_ANY, for those device which
> >> support it could declare it.
> >> b. define direction macro: RTE_DMA_DIR_ANY
> >> c. define dma_op: RTE_DMA_OP_FLAG_DIR_M2M/M2D/D2M/D2D which will
> >> passed as the flags parameters.
> >>
> >> For that driver which don't support this feature, just don't declare support it, and
> >> framework ensure that
> >> RTE_DMA_DIR_ANY is not passed down, and it can ignored
> >> RTE_DMA_OP_FLAG_DIR_xxx flag when enqueue API.
> >>
> >> For that driver which support this feature, application could create one channel
> >> with RTE_DMA_DIR_ANY or RTE_DMA_DIR_MEM_TO_MEM.
> >> If created with RTE_DMA_DIR_ANY, the RTE_DMA_OP_FLAG_DIR_xxx should be
> >> sensed in the driver.
> >> If created with RTE_DMA_DIR_MEM_TO_MEM, the
> >> RTE_DMA_OP_FLAG_DIR_xxx could be ignored.
> >>
> > Your design looks ok to me.
> > 
> >>
> >>> <snip>
> >>>
> >>>
> >>> Regards,
> >>> Gagan
> >>>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v20 2/7] dmadev: introduce DMA device library internal header
  2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 2/7] dmadev: introduce DMA device library internal header Chengwen Feng
@ 2021-09-06 13:35     ` Bruce Richardson
  2021-09-07 13:05       ` fengchengwen
  0 siblings, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-09-06 13:35 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

On Sat, Sep 04, 2021 at 06:10:22PM +0800, Chengwen Feng wrote:
> This patch introduce DMA device library internal header, which contains
> internal data types that are used by the DMA devices in order to expose
> their ops to the class.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
> Reviewed-by: Conor Walsh <conor.walsh@intel.com>
> ---
<snip>
> +struct rte_dmadev {
> +	void *dev_private;
> +	/**< PMD-specific private data.
> +	 *
> +	 * - If is the primary process, after dmadev allocated by
> +	 * rte_dmadev_pmd_allocate(), the PCI/SoC device probing should
> +	 * initialize this field, and copy it's value to the 'dev_private'
> +	 * field of 'struct rte_dmadev_data' which pointer by 'data' filed.
> +	 *
> +	 * - If is the secondary process, dmadev framework will initialize this
> +	 * field by copy from 'dev_private' field of 'struct rte_dmadev_data'
> +	 * which initialized by primary process.
> +	 *
> +	 * @note It's the primary process responsibility to deinitialize this
> +	 * field after invoke rte_dmadev_pmd_release() in the PCI/SoC device
> +	 * removing stage.
> +	 */
> +	rte_dmadev_copy_t             copy;
> +	rte_dmadev_copy_sg_t          copy_sg;
> +	rte_dmadev_fill_t             fill;
> +	rte_dmadev_submit_t           submit;
> +	rte_dmadev_completed_t        completed;
> +	rte_dmadev_completed_status_t completed_status;
> +	void *reserved_ptr[7]; /**< Reserved for future IO function. */

This is new in this set, I think. I assume that 7 was chosen so that we
have the "data" pointer and the "dev_ops" pointers on the second cacheline
(if 64-byte CLs)? If so, I wonder if we can find a good way to express that
in the code or in the comments?

The simplest - and probably as clear as any - is to split this into
"void *__reserved_cl0" and "void *__reserved_cl1[6]" to show that it is
split across the two cachelines, with the latter having comment:
"Reserve space for future IO functions, while keeping data and dev_ops
pointers on the second cacheline"

If we don't mind using a slightly different type the magic "6" could be
changed to a computation:
char __reserved_cl1[RTE_CACHELINE_SZ - sizeof(void *) * 2];

However, for simplicity, I think the magic 6 can be kept, and just split
into reserved_cl0 and reserved_cl1 as I suggest above.

/Bruce


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v20 0/7] support dmadev
  2021-09-04 10:10 ` [dpdk-dev] [PATCH v20 0/7] support dmadev Chengwen Feng
                     ` (6 preceding siblings ...)
  2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 7/7] app/test: add dmadev API test Chengwen Feng
@ 2021-09-06 13:37   ` Bruce Richardson
  7 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-09-06 13:37 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

On Sat, Sep 04, 2021 at 06:10:20PM +0800, Chengwen Feng wrote:
> This patch set contains seven patch for new add dmadev.
> 
> Chengwen Feng (7):
>   dmadev: introduce DMA device library public APIs
>   dmadev: introduce DMA device library internal header
>   dmadev: introduce DMA device library PMD header
>   dmadev: introduce DMA device library implementation
>   doc: add DMA device library guide
>   dma/skeleton: introduce skeleton dmadev driver
>   app/test: add dmadev API test
> 
> ---
> v20:
> * delete unnecessary and duplicate include header files.
> * the conf_sz parameter is added to the configure and vchan-setup
>   callbacks of the PMD, this is mainly used to enhance ABI
>   compatibility.
> * the rte_dmadev structure field is rearranged to reserve more space
>   for I/O functions.
> * fix some ambiguous and unnecessary comments.
> * fix the potential memory leak of ut.
> * redefine skeldma_init_once to skeldma_count.
> * suppress rte_dmadev error output when execute ut.
Thanks for V20, those I've checked look like some good changes.

/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library public APIs
  2021-09-06  7:52           ` fengchengwen
  2021-09-06  8:06             ` Jerin Jacob
  2021-09-06  8:08             ` Bruce Richardson
@ 2021-09-07 12:55             ` fengchengwen
  2 siblings, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-09-07 12:55 UTC (permalink / raw)
  To: Gagandeep Singh, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, Nipun Gupta, Hemant Agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

Hi Gagandeep,

Based on the following considerations, it was decided not to support "ANY
direction on a channel".

As we previously analyze [1], many hardware (like dpaa2/octeontx2/Kunpeng)
supports multiple directions on a hardware channel.

Based on the consideration of smooth migration of existing drivers, we basically
confirmed the concept of using virtual-queue to represent different transmission
direction contexts, and which has persisted to this day.

Although it can be extended based on my proposal, this change will give rise to
new interface models, which applications have to take into account.
If we stay the same, the applications based on the original rawdev interface can
adapt quickly.

Also, Jorin has made some comments from a performance perspective, which I agree
with.

[1] https://lore.kernel.org/dpdk-dev/c4a0ee30-f7b8-f8a1-463c-8eedaec82aea@huawei.com/

BTW: @Jorin @Bruce thank you for your reply.

Thanks

On 2021/9/6 15:52, fengchengwen wrote:
> I think we can add support for DIR_ANY.
> @Bruce @Jerin Would you please take a look at my proposal?
> 
> On 2021/9/6 14:48, Gagandeep Singh wrote:
>>
>>
>>> -----Original Message-----
>>> From: fengchengwen <fengchengwen@huawei.com>
>>> Sent: Saturday, September 4, 2021 7:02 AM
>>> To: Gagandeep Singh <G.Singh@nxp.com>; thomas@monjalon.net;
>>> ferruh.yigit@intel.com; bruce.richardson@intel.com; jerinj@marvell.com;
>>> jerinjacobk@gmail.com; andrew.rybchenko@oktetlabs.ru
>>> Cc: dev@dpdk.org; mb@smartsharesystems.com; Nipun Gupta
>>> <nipun.gupta@nxp.com>; Hemant Agrawal <hemant.agrawal@nxp.com>;
>>> maxime.coquelin@redhat.com; honnappa.nagarahalli@arm.com;
>>> david.marchand@redhat.com; sburla@marvell.com; pkapoor@marvell.com;
>>> konstantin.ananyev@intel.com; conor.walsh@intel.com
>>> Subject: Re: [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library
>>> public APIs
>>>
>>> On 2021/9/3 19:42, Gagandeep Singh wrote:
>>>> Hi,
>>>>
>>>> <snip>
>>>>> +
>>>>> +/**
>>>>> + * @warning
>>>>> + * @b EXPERIMENTAL: this API may change without prior notice.
>>>>> + *
>>>>> + * Close a DMA device.
>>>>> + *
>>>>> + * The device cannot be restarted after this call.
>>>>> + *
>>>>> + * @param dev_id
>>>>> + *   The identifier of the device.
>>>>> + *
>>>>> + * @return
>>>>> + *   0 on success. Otherwise negative value is returned.
>>>>> + */
>>>>> +__rte_experimental
>>>>> +int
>>>>> +rte_dmadev_close(uint16_t dev_id);
>>>>> +
>>>>> +/**
>>>>> + * rte_dma_direction - DMA transfer direction defines.
>>>>> + */
>>>>> +enum rte_dma_direction {
>>>>> +	RTE_DMA_DIR_MEM_TO_MEM,
>>>>> +	/**< DMA transfer direction - from memory to memory.
>>>>> +	 *
>>>>> +	 * @see struct rte_dmadev_vchan_conf::direction
>>>>> +	 */
>>>>> +	RTE_DMA_DIR_MEM_TO_DEV,
>>>>> +	/**< DMA transfer direction - from memory to device.
>>>>> +	 * In a typical scenario, the SoCs are installed on host servers as
>>>>> +	 * iNICs through the PCIe interface. In this case, the SoCs works in
>>>>> +	 * EP(endpoint) mode, it could initiate a DMA move request from
>>>>> memory
>>>>> +	 * (which is SoCs memory) to device (which is host memory).
>>>>> +	 *
>>>>> +	 * @see struct rte_dmadev_vchan_conf::direction
>>>>> +	 */
>>>>> +	RTE_DMA_DIR_DEV_TO_MEM,
>>>>> +	/**< DMA transfer direction - from device to memory.
>>>>> +	 * In a typical scenario, the SoCs are installed on host servers as
>>>>> +	 * iNICs through the PCIe interface. In this case, the SoCs works in
>>>>> +	 * EP(endpoint) mode, it could initiate a DMA move request from device
>>>>> +	 * (which is host memory) to memory (which is SoCs memory).
>>>>> +	 *
>>>>> +	 * @see struct rte_dmadev_vchan_conf::direction
>>>>> +	 */
>>>>> +	RTE_DMA_DIR_DEV_TO_DEV,
>>>>> +	/**< DMA transfer direction - from device to device.
>>>>> +	 * In a typical scenario, the SoCs are installed on host servers as
>>>>> +	 * iNICs through the PCIe interface. In this case, the SoCs works in
>>>>> +	 * EP(endpoint) mode, it could initiate a DMA move request from device
>>>>> +	 * (which is host memory) to the device (which is another host memory).
>>>>> +	 *
>>>>> +	 * @see struct rte_dmadev_vchan_conf::direction
>>>>> +	 */
>>>>> +};
>>>>> +
>>>>> +/**
>>>>> ..
>>>> The enum rte_dma_direction must have a member RTE_DMA_DIR_ANY for a
>>> channel that supports all 4 directions.
>>>
>>> We've discussed this issue before. The earliest solution was to set up channels to
>>> support multiple DIRs, but
>>> no hardware/driver actually used this (at least at that time). they (like
>>> octeontx2_dma/dpaa) all setup one logic
>>> channel server single transfer direction.
>>>
>>> So, do you have that kind of desire for your driver ?
>>>
>> Both DPAA1 and DPAA2 drivers can support ANY direction on a channel, so we would like to have this option as well.
>>
>>>
>>> If you have a strong desire, we'll consider the following options:
>>>
>>> Once the channel was setup, there are no other parameters to indicate the copy
>>> request's transfer direction.
>>> So I think it is not enough to define RTE_DMA_DIR_ANY only.
>>>
>>> Maybe we could add RTE_DMA_OP_xxx marco
>>> (RTE_DMA_OP_FLAG_M2M/M2D/D2M/D2D), these macro will as the flags
>>> parameter
>>> passsed to enqueue API, so the enqueue API knows which transfer direction the
>>> request corresponding.
>>>
>>> We can easily expand from the existing framework with following:
>>> a. define capability RTE_DMADEV_CAPA_DIR_ANY, for those device which
>>> support it could declare it.
>>> b. define direction macro: RTE_DMA_DIR_ANY
>>> c. define dma_op: RTE_DMA_OP_FLAG_DIR_M2M/M2D/D2M/D2D which will
>>> passed as the flags parameters.
>>>
>>> For that driver which don't support this feature, just don't declare support it, and
>>> framework ensure that
>>> RTE_DMA_DIR_ANY is not passed down, and it can ignored
>>> RTE_DMA_OP_FLAG_DIR_xxx flag when enqueue API.
>>>
>>> For that driver which support this feature, application could create one channel
>>> with RTE_DMA_DIR_ANY or RTE_DMA_DIR_MEM_TO_MEM.
>>> If created with RTE_DMA_DIR_ANY, the RTE_DMA_OP_FLAG_DIR_xxx should be
>>> sensed in the driver.
>>> If created with RTE_DMA_DIR_MEM_TO_MEM, the
>>> RTE_DMA_OP_FLAG_DIR_xxx could be ignored.
>>>
>> Your design looks ok to me.
>>
>>>
>>>> <snip>
>>>>
>>>>
>>>> Regards,
>>>> Gagan
>>>>
> .
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v21 0/7] support dmadev
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (23 preceding siblings ...)
  2021-09-04 10:10 ` [dpdk-dev] [PATCH v20 0/7] support dmadev Chengwen Feng
@ 2021-09-07 12:56 ` Chengwen Feng
  2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 1/7] dmadev: introduce DMA device library public APIs Chengwen Feng
                     ` (6 more replies)
  2021-09-16  3:41 ` [dpdk-dev] [PATCH v22 0/5] support dmadev Chengwen Feng
                   ` (4 subsequent siblings)
  29 siblings, 7 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-07 12:56 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch set contains seven patch for new add dmadev.

Chengwen Feng (7):
  dmadev: introduce DMA device library public APIs
  dmadev: introduce DMA device library internal header
  dmadev: introduce DMA device library PMD header
  dmadev: introduce DMA device library implementation
  doc: add DMA device library guide
  dma/skeleton: introduce skeleton dmadev driver
  app/test: add dmadev API test

---
v21:
* add comment for reserved fields of struct rte_dmadev.
v20:
* delete unnecessary and duplicate include header files.
* the conf_sz parameter is added to the configure and vchan-setup
  callbacks of the PMD, this is mainly used to enhance ABI
  compatibility.
* the rte_dmadev structure field is rearranged to reserve more space
  for I/O functions.
* fix some ambiguous and unnecessary comments.
* fix the potential memory leak of ut.
* redefine skeldma_init_once to skeldma_count.
* suppress rte_dmadev error output when execute ut.
v19:
* squash maintainer patch to patch #1.
v18:
* RTE_DMA_STATUS_* add BUS_READ/WRITE_ERR, PAGE_FAULT.
* rte_dmadev dataplane API add judge dev_started when debug enable.
* rte_dmadev_start/vchan_setup add judge device configured.
* rte_dmadev_dump support format capability name.
* optimized the comments of rte_dmadev.
* fix skeldma_copy always return zero when enqueue successful.
* log encapsulation macro add newline characters.
* test_dmadev_api support rte_dmadev_dump() ut.

 MAINTAINERS                            |    7 +
 app/test/meson.build                   |    4 +
 app/test/test_dmadev.c                 |   43 +
 app/test/test_dmadev_api.c             |  543 ++++++++++++
 config/rte_config.h                    |    3 +
 doc/api/doxy-api-index.md              |    1 +
 doc/api/doxy-api.conf.in               |    1 +
 doc/guides/prog_guide/dmadev.rst       |  125 +++
 doc/guides/prog_guide/img/dmadev.svg   |  283 +++++++
 doc/guides/prog_guide/index.rst        |    1 +
 doc/guides/rel_notes/release_21_11.rst |    5 +
 drivers/dma/meson.build                |   11 +
 drivers/dma/skeleton/meson.build       |    7 +
 drivers/dma/skeleton/skeleton_dmadev.c |  594 ++++++++++++++
 drivers/dma/skeleton/skeleton_dmadev.h |   61 ++
 drivers/dma/skeleton/version.map       |    3 +
 drivers/meson.build                    |    1 +
 lib/dmadev/meson.build                 |    7 +
 lib/dmadev/rte_dmadev.c                |  607 ++++++++++++++
 lib/dmadev/rte_dmadev.h                | 1047 ++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h           |  187 +++++
 lib/dmadev/rte_dmadev_pmd.h            |   72 ++
 lib/dmadev/version.map                 |   35 +
 lib/meson.build                        |    1 +
 24 files changed, 3649 insertions(+)
 create mode 100644 app/test/test_dmadev.c
 create mode 100644 app/test/test_dmadev_api.c
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg
 create mode 100644 drivers/dma/meson.build
 create mode 100644 drivers/dma/skeleton/meson.build
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
 create mode 100644 drivers/dma/skeleton/version.map
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v21 1/7] dmadev: introduce DMA device library public APIs
  2021-09-07 12:56 ` [dpdk-dev] [PATCH v21 " Chengwen Feng
@ 2021-09-07 12:56   ` Chengwen Feng
  2021-09-09 10:33     ` Thomas Monjalon
  2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 2/7] dmadev: introduce DMA device library internal header Chengwen Feng
                     ` (5 subsequent siblings)
  6 siblings, 1 reply; 339+ messages in thread
From: Chengwen Feng @ 2021-09-07 12:56 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

The 'dmadevice' is a generic type of DMA device.

This patch introduce the 'dmadevice' public APIs which expose generic
operations that can enable configuration and I/O with the DMA devices.

Maintainers update is also included in this patch.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 MAINTAINERS                            |   4 +
 doc/api/doxy-api-index.md              |   1 +
 doc/api/doxy-api.conf.in               |   1 +
 doc/guides/rel_notes/release_21_11.rst |   5 +
 lib/dmadev/meson.build                 |   4 +
 lib/dmadev/rte_dmadev.h                | 951 +++++++++++++++++++++++++
 lib/dmadev/version.map                 |  24 +
 lib/meson.build                        |   1 +
 8 files changed, 991 insertions(+)
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 1e0d303394..9885cc56b7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -496,6 +496,10 @@ F: drivers/raw/skeleton/
 F: app/test/test_rawdev.c
 F: doc/guides/prog_guide/rawdev.rst
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+
 
 Memory Pool Drivers
 -------------------
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107a03..ce08250639 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -27,6 +27,7 @@ The public API headers are grouped by topics:
   [event_timer_adapter]    (@ref rte_event_timer_adapter.h),
   [event_crypto_adapter]   (@ref rte_event_crypto_adapter.h),
   [rawdev]             (@ref rte_rawdev.h),
+  [dmadev]             (@ref rte_dmadev.h),
   [metrics]            (@ref rte_metrics.h),
   [bitrate]            (@ref rte_bitrate.h),
   [latency]            (@ref rte_latencystats.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 325a0195c6..a44a92b5fe 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -34,6 +34,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/cmdline \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
+                          @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/distributor \
                           @TOPDIR@/lib/efd \
                           @TOPDIR@/lib/ethdev \
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 675b573834..3562822b3d 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -62,6 +62,11 @@ New Features
   * Added bus-level parsing of the devargs syntax.
   * Kept compatibility with the legacy syntax as parsing fallback.
 
+* **Added dmadev library support.**
+
+  The dmadev library provides a DMA device framework for management and
+  provision of hardware and software DMA devices.
+
 
 Removed Items
 -------------
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000000..6d5bd85373
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+headers = files('rte_dmadev.h')
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000000..76d71615eb
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,951 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ * Copyright(c) 2021 Marvell International Ltd.
+ * Copyright(c) 2021 SmartShare Systems.
+ */
+
+#ifndef _RTE_DMADEV_H_
+#define _RTE_DMADEV_H_
+
+/**
+ * @file rte_dmadev.h
+ *
+ * RTE DMA (Direct Memory Access) device APIs.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW-DMA-channel |               | HW-DMA-channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW-DMA-Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel. e.g. Application could create
+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+ * virtual DMA channel 1 for memory-to-device transfer scenario.
+ *
+ * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
+ * PCI/SoC device probing phase performed at EAL initialization time. And could
+ * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
+ * phase.
+ *
+ * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dmadev_configure()
+ *     - rte_dmadev_vchan_setup()
+ *     - rte_dmadev_start()
+ *
+ * Then, the application can invoke dataplane APIs to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dmadev_configure() or rte_dmadev_vchan_setup()), it must invoke
+ * rte_dmadev_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dmadev_start() again. The dataplane APIs should not be
+ * invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the
+ * rte_dmadev_close() function.
+ *
+ * The dataplane APIs include two parts:
+ * The first part is the submission of operation requests:
+ *     - rte_dmadev_copy()
+ *     - rte_dmadev_copy_sg()
+ *     - rte_dmadev_fill()
+ *     - rte_dmadev_submit()
+ *
+ * These APIs could work with different virtual DMA channels which have
+ * different contexts.
+ *
+ * The first three APIs are used to submit the operation request to the virtual
+ * DMA channel, if the submission is successful, an uint16_t ring_idx is
+ * returned, otherwise a negative number is returned.
+ *
+ * The last API was used to issue doorbell to hardware, and also there are flags
+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
+ * same work.
+ *
+ * The second part is to obtain the result of requests:
+ *     - rte_dmadev_completed()
+ *         - return the number of operation requests completed successfully.
+ *     - rte_dmadev_completed_status()
+ *         - return the number of operation requests completed.
+ *
+ * @note The two completed APIs also support return the last completed
+ * operation's ring_idx.
+ * @note If the dmadev works in silent mode (@see RTE_DMADEV_CAPA_SILENT),
+ * application does not invoke the above two completed APIs.
+ *
+ * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
+ * rte_dmadev_fill()) returned, the rules are as follows:
+ *     - ring_idx for each virtual DMA channel are independent.
+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *       when it reach UINT16_MAX, it wraps back to zero.
+ *     - This ring_idx can be used by applications to track per-operation
+ *       metadata in an application-defined circular ring.
+ *     - The initial ring_idx of a virtual DMA channel is zero, after the
+ *       device is stopped, the ring_idx needs to be reset to zero.
+ *
+ * One example:
+ *     - step-1: start one dmadev
+ *     - step-2: enqueue a copy operation, the ring_idx return is 0
+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1
+ *     - ...
+ *     - step-101: stop the dmadev
+ *     - step-102: start the dmadev
+ *     - step-103: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *
+ * The DMA operation address used in enqueue APIs (i.e. rte_dmadev_copy(),
+ * rte_dmadev_copy_sg(), rte_dmadev_fill()) defined as rte_iova_t type. The
+ * dmadev supports two types of address: memory address and device address.
+ *
+ * - memory address: the source and destination address of the memory-to-memory
+ * transfer type, or the source address of the memory-to-device transfer type,
+ * or the destination address of the device-to-memory transfer type.
+ * @note If the device support SVA (@see RTE_DMADEV_CAPA_SVA), the memory
+ * address can be any VA address, otherwise it must be an IOVA address.
+ *
+ * - device address: the source and destination address of the device-to-device
+ * transfer type, or the source address of the device-to-memory transfer type,
+ * or the destination address of the memory-to-device transfer type.
+ *
+ * By default, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
+ *
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the device identifier for the named DMA device.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   Returns DMA device identifier on success.
+ *   - <0: Failure to find named DMA device.
+ */
+__rte_experimental
+int
+rte_dmadev_get_dev_id(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_count(void);
+
+/* Enumerates DMA device capabilities. */
+#define RTE_DMADEV_CAPA_MEM_TO_MEM	(1ull << 0)
+/**< DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_MEM_TO_DEV	(1ull << 1)
+/**< DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_MEM	(1ull << 2)
+/**< DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_DEV_TO_DEV	(1ull << 3)
+/**< DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ * @see struct rte_dmadev_port_param::port_type
+ */
+
+#define RTE_DMADEV_CAPA_SVA		(1ull << 4)
+/**< DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_SILENT		(1ull << 5)
+/**< DMA device support work in silent mode.
+ * In this mode, application don't required to invoke rte_dmadev_completed*()
+ * API.
+ *
+ * @see struct rte_dmadev_conf::silent_mode
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY	(1ull << 32)
+/**< DMA device support copy ops.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_COPY_SG	(1ull << 33)
+/**< DMA device support scatter-gather list copy ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+#define RTE_DMADEV_CAPA_OPS_FILL	(1ull << 34)
+/**< DMA device support fill ops.
+ *
+ * @see struct rte_dmadev_info::dev_capa
+ */
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ */
+struct rte_dmadev_info {
+	struct rte_device *device; /**< Generic Device information. */
+	uint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */
+	uint16_t max_vchans;
+	/**< Maximum number of virtual DMA channels supported. */
+	uint16_t max_desc;
+	/**< Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/**< Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t max_sges;
+	/**< Maximum number of source or destination scatter-gather entry
+	 * supported.
+	 * If the device does not support COPY_SG capability, this value can be
+	 * zero.
+	 * If the device supports COPY_SG capability, then rte_dmadev_copy_sg()
+	 * parameter nb_src/nb_dst should not exceed this value.
+	 */
+	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dmadev_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ */
+struct rte_dmadev_conf {
+	uint16_t nb_vchans;
+	/**< The number of virtual DMA channels to set up for the DMA device.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dmadev_info which get from rte_dmadev_info_get().
+	 */
+	bool enable_silent;
+	/**< Indicates whether to enable silent mode.
+	 * false-default mode, true-silent mode.
+	 * This value can be set to true only when the SILENT capability is
+	 * supported.
+	 *
+	 * @see RTE_DMADEV_CAPA_SILENT
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dmadev_conf
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_start(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dmadev_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_stop(uint16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_close(uint16_t dev_id);
+
+/**
+ * DMA transfer direction defines.
+ */
+enum rte_dma_direction {
+	RTE_DMA_DIR_MEM_TO_MEM,
+	/**< DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV,
+	/**< DMA transfer direction - from memory to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is SoCs memory) to device (which is host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM,
+	/**< DMA transfer direction - from device to memory.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to memory (which is SoCs memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV,
+	/**< DMA transfer direction - from device to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to the device (which is another host memory).
+	 *
+	 * @see struct rte_dmadev_vchan_conf::direction
+	 */
+};
+
+/**
+ * enum rte_dmadev_port_type - DMA access port type defines.
+ *
+ * @see struct rte_dmadev_port_param::port_type
+ */
+enum rte_dmadev_port_type {
+	RTE_DMADEV_PORT_NONE,
+	RTE_DMADEV_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dmadev_vchan_conf::src_port
+ * @see struct rte_dmadev_vchan_conf::dst_port
+ */
+struct rte_dmadev_port_param {
+	enum rte_dmadev_port_type port_type;
+	/**< The device access port type.
+	 * @see enum rte_dmadev_port_type
+	 */
+	union {
+		/** PCIe access port parameters.
+		 *
+		 * The following model shows SoC's PCIe module connects to
+		 * multiple PCIe hosts and multiple endpoints. The PCIe module
+		 * has an integrated DMA controller.
+		 *
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * \code{.unparsed}
+		 * System Bus
+		 *    |     ----------PCIe module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIe Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIe Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIe Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * \endcode
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		struct {
+			uint64_t coreid : 4; /**< PCIe core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			uint64_t pasid : 20;
+			/**< The pasid filed in TLP packet. */
+			uint64_t attr : 3;
+			/**< The attributes filed in TLP packet. */
+			uint64_t ph : 2;
+			/**< The processing hint filed in TLP packet. */
+			uint64_t st : 16;
+			/**< The steering tag filed in TLP packet. */
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ */
+struct rte_dmadev_vchan_conf {
+	enum rte_dma_direction direction;
+	/**< Transfer direction
+	 * @see enum rte_dma_direction
+	 */
+	uint16_t nb_desc;
+	/**< Number of descriptor for the virtual DMA channel */
+	struct rte_dmadev_port_param src_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+	struct rte_dmadev_port_param dst_port;
+	/**< 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 * @see struct rte_dmadev_port_param
+	 */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel. The value must be in the range
+ *   [0, nb_vchans - 1] previously supplied to rte_dmadev_configure().
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dmadev_vchan_conf object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+		       const struct rte_dmadev_vchan_conf *conf);
+
+/**
+ * A structure used to retrieve statistics.
+ */
+struct rte_dmadev_stats {
+	uint64_t submitted;
+	/**< Count of operations which were submitted to hardware. */
+	uint64_t completed;
+	/**< Count of operations which were completed, including successful and
+	 * failed completions.
+	 */
+	uint64_t errors;
+	/**< Count of operations which failed to complete. */
+};
+
+#define RTE_DMADEV_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dmadev_stats
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMADEV_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f);
+
+/**
+ * rte_dma_status_code - DMA transfer result status code defines.
+ */
+enum rte_dma_status_code {
+	RTE_DMA_STATUS_SUCCESSFUL,
+	/**< The operation completed successfully. */
+	RTE_DMA_STATUS_USER_ABORT,
+	/**< The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPTED,
+	/**< The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/**< The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/**< The operation failed to complete due invalid destination
+	 * address.
+	 */
+	RTE_DMA_STATUS_INVALID_ADDR,
+	/**< The operation failed to complete due invalid source or destination
+	 * address, cover the case that only knows the address error, but not
+	 * sure which address error.
+	 */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/**< The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/**< The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_BUS_READ_ERROR,
+	/**< The operation failed to complete due bus read error. */
+	RTE_DMA_STATUS_BUS_WRITE_ERROR,
+	/**< The operation failed to complete due bus write error. */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/**< The operation failed to complete due bus error, cover the case that
+	 * only knows the bus error, but not sure which direction error.
+	 */
+	RTE_DMA_STATUS_DATA_POISION,
+	/**< The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/**< The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/**< The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the memory-to-device/
+	 * device-to-memory/device-to-device transfer scenario.
+	 */
+	RTE_DMA_STATUS_PAGE_FAULT,
+	/**< The operation failed to complete due lookup page fault. */
+	RTE_DMA_STATUS_ERROR_UNKNOWN = 0x100,
+	/**< The operation failed to complete due unknown reason.
+	 * The initial value is 256, which reserves space for future errors.
+	 */
+};
+
+/**
+ * A structure used to hold scatter-gather DMA operation request entry.
+ */
+struct rte_dmadev_sge {
+	rte_iova_t addr; /**< The DMA operation address. */
+	uint32_t length; /**< The DMA operation length. */
+};
+
+/* DMA flags to augment operation preparation. */
+#define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
+/**< DMA fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ * If the specify DMA HW works in-order (it means it has default fence between
+ * operations), this flag could be NOP.
+ *
+ * @see rte_dmadev_copy()
+ * @see rte_dmadev_copy_sg()
+ * @see rte_dmadev_fill()
+ */
+
+#define RTE_DMA_OP_FLAG_SUBMIT	(1ull << 1)
+/**< DMA submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ */
+
+#define RTE_DMA_OP_FLAG_LLC	(1ull << 2)
+/**< DMA write data to low level cache hint.
+ * Used for performance optimization, this is just a hint, and there is no
+ * capability bit for this, driver should not return error if this flag was set.
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+		uint32_t length, uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter-gather list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter-gather list copy operation to be performed by
+ * hardware, if the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then
+ * trigger doorbell to begin this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The pointer of source scatter-gather entry array.
+ * @param dst
+ *   The pointer of destination scatter-gather entry array.
+ * @param nb_src
+ *   The number of source scatter-gather entry.
+ *   @see struct rte_dmadev_info::max_sges
+ * @param nb_dst
+ *   The number of destination scatter-gather entry.
+ *   @see struct rte_dmadev_info::max_sges
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
+		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
+		   uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+int
+rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
+		rte_iova_t dst, uint32_t length, uint64_t flags);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dmadev_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's ring_idx.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed. This return value
+ *   must be less than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		     uint16_t *last_idx, bool *has_error);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been completed, and the
+ * operations result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's ring_idx.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   This is a pointer to an array of length 'nb_cpls' that holds the completion
+ *   status code of each operation.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that completed. This return value must be less
+ *   than or equal to the value of nb_cpls.
+ *   If this number is greater than zero (assuming n), then n values in the
+ *   status array are also set.
+ */
+__rte_experimental
+uint16_t
+rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
+			    const uint16_t nb_cpls, uint16_t *last_idx,
+			    enum rte_dma_status_code *status);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000000..2e37882364
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,24 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dmadev_close;
+	rte_dmadev_completed;
+	rte_dmadev_completed_status;
+	rte_dmadev_configure;
+	rte_dmadev_copy;
+	rte_dmadev_copy_sg;
+	rte_dmadev_count;
+	rte_dmadev_dump;
+	rte_dmadev_fill;
+	rte_dmadev_get_dev_id;
+	rte_dmadev_info_get;
+	rte_dmadev_is_valid_dev;
+	rte_dmadev_start;
+	rte_dmadev_stats_get;
+	rte_dmadev_stats_reset;
+	rte_dmadev_stop;
+	rte_dmadev_submit;
+	rte_dmadev_vchan_setup;
+
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4323..a542c238d2 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -44,6 +44,7 @@ libraries = [
         'power',
         'pdump',
         'rawdev',
+        'dmadev',
         'regexdev',
         'rib',
         'reorder',
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v21 2/7] dmadev: introduce DMA device library internal header
  2021-09-07 12:56 ` [dpdk-dev] [PATCH v21 " Chengwen Feng
  2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 1/7] dmadev: introduce DMA device library public APIs Chengwen Feng
@ 2021-09-07 12:56   ` Chengwen Feng
  2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 3/7] dmadev: introduce DMA device library PMD header Chengwen Feng
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-07 12:56 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch introduce DMA device library internal header, which contains
internal data types that are used by the DMA devices in order to expose
their ops to the class.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 lib/dmadev/meson.build       |   1 +
 lib/dmadev/rte_dmadev_core.h | 185 +++++++++++++++++++++++++++++++++++
 2 files changed, 186 insertions(+)
 create mode 100644 lib/dmadev/rte_dmadev_core.h

diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index 6d5bd85373..f421ec1909 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -2,3 +2,4 @@
 # Copyright(c) 2021 HiSilicon Limited.
 
 headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000000..cbf5e88621
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#ifndef _RTE_DMADEV_CORE_H_
+#define _RTE_DMADEV_CORE_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device internal header.
+ *
+ * This header contains internal data types, that are used by the DMA devices
+ * in order to expose their ops to the class.
+ *
+ * Applications should not use these API directly.
+ *
+ */
+
+struct rte_dmadev;
+
+typedef int (*rte_dmadev_info_get_t)(const struct rte_dmadev *dev,
+				     struct rte_dmadev_info *dev_info,
+				     uint32_t info_sz);
+/**< @internal Used to get device information of a device. */
+
+typedef int (*rte_dmadev_configure_t)(struct rte_dmadev *dev,
+				      const struct rte_dmadev_conf *dev_conf,
+				      uint32_t conf_sz);
+/**< @internal Used to configure a device. */
+
+typedef int (*rte_dmadev_start_t)(struct rte_dmadev *dev);
+/**< @internal Used to start a configured device. */
+
+typedef int (*rte_dmadev_stop_t)(struct rte_dmadev *dev);
+/**< @internal Used to stop a configured device. */
+
+typedef int (*rte_dmadev_close_t)(struct rte_dmadev *dev);
+/**< @internal Used to close a configured device. */
+
+typedef int (*rte_dmadev_vchan_setup_t)(struct rte_dmadev *dev, uint16_t vchan,
+				const struct rte_dmadev_vchan_conf *conf,
+				uint32_t conf_sz);
+/**< @internal Used to allocate and set up a virtual DMA channel. */
+
+typedef int (*rte_dmadev_stats_get_t)(const struct rte_dmadev *dev,
+			uint16_t vchan, struct rte_dmadev_stats *stats,
+			uint32_t stats_sz);
+/**< @internal Used to retrieve basic statistics. */
+
+typedef int (*rte_dmadev_stats_reset_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to reset basic statistics. */
+
+typedef int (*rte_dmadev_dump_t)(const struct rte_dmadev *dev, FILE *f);
+/**< @internal Used to dump internal information. */
+
+typedef int (*rte_dmadev_copy_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 rte_iova_t src, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a copy operation. */
+
+typedef int (*rte_dmadev_copy_sg_t)(struct rte_dmadev *dev, uint16_t vchan,
+				    const struct rte_dmadev_sge *src,
+				    const struct rte_dmadev_sge *dst,
+				    uint16_t nb_src, uint16_t nb_dst,
+				    uint64_t flags);
+/**< @internal Used to enqueue a scatter-gather list copy operation. */
+
+typedef int (*rte_dmadev_fill_t)(struct rte_dmadev *dev, uint16_t vchan,
+				 uint64_t pattern, rte_iova_t dst,
+				 uint32_t length, uint64_t flags);
+/**< @internal Used to enqueue a fill operation. */
+
+typedef int (*rte_dmadev_submit_t)(struct rte_dmadev *dev, uint16_t vchan);
+/**< @internal Used to trigger hardware to begin working. */
+
+typedef uint16_t (*rte_dmadev_completed_t)(struct rte_dmadev *dev,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+/**< @internal Used to return number of successful completed operations. */
+
+typedef uint16_t (*rte_dmadev_completed_status_t)(struct rte_dmadev *dev,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+/**< @internal Used to return number of completed operations. */
+
+/**
+ * Possible states of a DMA device.
+ */
+enum rte_dmadev_state {
+	RTE_DMADEV_UNUSED = 0,
+	/**< Device is unused before being probed. */
+	RTE_DMADEV_ATTACHED,
+	/**< Device is attached when allocated in probing. */
+};
+
+/**
+ * DMA device operations function pointer table
+ */
+struct rte_dmadev_ops {
+	rte_dmadev_info_get_t       dev_info_get;
+	rte_dmadev_configure_t      dev_configure;
+	rte_dmadev_start_t          dev_start;
+	rte_dmadev_stop_t           dev_stop;
+	rte_dmadev_close_t          dev_close;
+
+	rte_dmadev_vchan_setup_t    vchan_setup;
+
+	rte_dmadev_stats_get_t      stats_get;
+	rte_dmadev_stats_reset_t    stats_reset;
+
+	rte_dmadev_dump_t           dev_dump;
+};
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_dmadev_data {
+	void *dev_private;
+	/**< PMD-specific private data.
+	 * This is a copy of the 'dev_private' field in the 'struct rte_dmadev'
+	 * from primary process, it is used by the secondary process to get
+	 * dev_private information.
+	 */
+	uint16_t dev_id; /**< Device [external] identifier. */
+	char dev_name[RTE_DMADEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	struct rte_dmadev_conf dev_conf; /**< DMA device configuration. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ *
+ * The dataplane APIs are located at the beginning of the structure, along
+ * with the pointer to where all the data elements for the particular device
+ * are stored in shared memory. This split scheme allows the function pointer
+ * and driver data to be per-process, while the actual configuration data for
+ * the device is shared.
+ * And the 'dev_private' field was placed in the first cache line to optimize
+ * performance because the PMD driver mainly depends on this field.
+ */
+struct rte_dmadev {
+	void *dev_private;
+	/**< PMD-specific private data.
+	 *
+	 * - If is the primary process, after dmadev allocated by
+	 * rte_dmadev_pmd_allocate(), the PCI/SoC device probing should
+	 * initialize this field, and copy it's value to the 'dev_private'
+	 * field of 'struct rte_dmadev_data' which pointer by 'data' filed.
+	 *
+	 * - If is the secondary process, dmadev framework will initialize this
+	 * field by copy from 'dev_private' field of 'struct rte_dmadev_data'
+	 * which initialized by primary process.
+	 *
+	 * @note It's the primary process responsibility to deinitialize this
+	 * field after invoke rte_dmadev_pmd_release() in the PCI/SoC device
+	 * removing stage.
+	 */
+	rte_dmadev_copy_t             copy;
+	rte_dmadev_copy_sg_t          copy_sg;
+	rte_dmadev_fill_t             fill;
+	rte_dmadev_submit_t           submit;
+	rte_dmadev_completed_t        completed;
+	rte_dmadev_completed_status_t completed_status;
+	void *reserved_cl0;
+	void *reserved_cl1[6];
+	/**< Reserve space for future IO functions, while keeping data and
+	 * dev_ops pointers on the second cacheline.
+	 */
+	struct rte_dmadev_data *data; /**< Pointer to device data. */
+	const struct rte_dmadev_ops *dev_ops; /**< Functions exported by PMD. */
+	struct rte_device *device;
+	/**< Device info which supplied during device initialization. */
+	enum rte_dmadev_state state; /**< Flag indicating the device state. */
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+} __rte_cache_aligned;
+
+#endif /* _RTE_DMADEV_CORE_H_ */
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v21 3/7] dmadev: introduce DMA device library PMD header
  2021-09-07 12:56 ` [dpdk-dev] [PATCH v21 " Chengwen Feng
  2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 1/7] dmadev: introduce DMA device library public APIs Chengwen Feng
  2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 2/7] dmadev: introduce DMA device library internal header Chengwen Feng
@ 2021-09-07 12:56   ` Chengwen Feng
  2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 4/7] dmadev: introduce DMA device library implementation Chengwen Feng
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-07 12:56 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch introduce DMA device library PMD header which was driver
facing APIs for a DMA device.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 lib/dmadev/meson.build      |  1 +
 lib/dmadev/rte_dmadev.h     |  2 ++
 lib/dmadev/rte_dmadev_pmd.h | 72 +++++++++++++++++++++++++++++++++++++
 lib/dmadev/version.map      | 10 ++++++
 4 files changed, 85 insertions(+)
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h

diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index f421ec1909..833baf7d54 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -3,3 +3,4 @@
 
 headers = files('rte_dmadev.h')
 indirect_headers += files('rte_dmadev_core.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 76d71615eb..c8dd0009f5 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -730,6 +730,8 @@ struct rte_dmadev_sge {
 	uint32_t length; /**< The DMA operation length. */
 };
 
+#include "rte_dmadev_core.h"
+
 /* DMA flags to augment operation preparation. */
 #define RTE_DMA_OP_FLAG_FENCE	(1ull << 0)
 /**< DMA fence flag.
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000000..45141f9dc1
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef _RTE_DMADEV_PMD_H_
+#define _RTE_DMADEV_PMD_H_
+
+/**
+ * @file
+ *
+ * RTE DMA Device PMD APIs
+ *
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param dev
+ *   Device to be released.
+ *
+ * @return
+ *   - 0 on success, negative on error
+ */
+__rte_internal
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev);
+
+/**
+ * @internal
+ * Return the DMA device based on the device name.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DMADEV_PMD_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index 2e37882364..d027eeac97 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -22,3 +22,13 @@ EXPERIMENTAL {
 
 	local: *;
 };
+
+INTERNAL {
+        global:
+
+	rte_dmadev_get_device_by_name;
+	rte_dmadev_pmd_allocate;
+	rte_dmadev_pmd_release;
+
+	local: *;
+};
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v21 4/7] dmadev: introduce DMA device library implementation
  2021-09-07 12:56 ` [dpdk-dev] [PATCH v21 " Chengwen Feng
                     ` (2 preceding siblings ...)
  2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 3/7] dmadev: introduce DMA device library PMD header Chengwen Feng
@ 2021-09-07 12:56   ` Chengwen Feng
  2021-09-08  9:54     ` Walsh, Conor
  2021-09-15 13:51     ` Kevin Laatz
  2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 5/7] doc: add DMA device library guide Chengwen Feng
                     ` (2 subsequent siblings)
  6 siblings, 2 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-07 12:56 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch introduce DMA device library implementation which includes
configuration and I/O with the DMA devices.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 config/rte_config.h          |   3 +
 lib/dmadev/meson.build       |   1 +
 lib/dmadev/rte_dmadev.c      | 607 +++++++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev.h      | 118 ++++++-
 lib/dmadev/rte_dmadev_core.h |   2 +
 lib/dmadev/version.map       |   1 +
 6 files changed, 720 insertions(+), 12 deletions(-)
 create mode 100644 lib/dmadev/rte_dmadev.c

diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c07d..331a431819 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -81,6 +81,9 @@
 /* rawdev defines */
 #define RTE_RAWDEV_MAX_DEVS 64
 
+/* dmadev defines */
+#define RTE_DMADEV_MAX_DEVS 64
+
 /* ip_fragmentation defines */
 #define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
 #undef RTE_LIBRTE_IP_FRAG_TBL_STAT
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index 833baf7d54..d2fc85e8c7 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2021 HiSilicon Limited.
 
+sources = files('rte_dmadev.c')
 headers = files('rte_dmadev.h')
 indirect_headers += files('rte_dmadev_core.h')
 driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000000..ee8db9aaca
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,607 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <inttypes.h>
+
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_memzone.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
+
+static const char *mz_rte_dmadev_data = "rte_dmadev_data";
+/* Shared memory between primary and secondary processes. */
+static struct {
+	struct rte_dmadev_data data[RTE_DMADEV_MAX_DEVS];
+} *dmadev_shared_data;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dmadev_logtype, INFO);
+#define RTE_DMADEV_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, rte_dmadev_logtype, "%s(): " fmt "\n", \
+		__func__, ##args)
+
+/* Macros to check for valid device id */
+#define RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dmadev_is_valid_dev(dev_id)) { \
+		RTE_DMADEV_LOG(ERR, "Invalid dev_id=%u", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+static int
+dmadev_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMADEV_LOG(ERR, "Name can't be NULL");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DMADEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMADEV_LOG(ERR, "Zero length DMA device name");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DMADEV_NAME_MAX_LEN) {
+		RTE_DMADEV_LOG(ERR, "DMA device name is too long");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static uint16_t
+dmadev_find_free_dev(void)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (dmadev_shared_data->data[i].dev_name[0] == '\0')
+			return i;
+	}
+
+	return RTE_DMADEV_MAX_DEVS;
+}
+
+static struct rte_dmadev*
+dmadev_find(const char *name)
+{
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if ((rte_dmadevices[i].state == RTE_DMADEV_ATTACHED) &&
+		    (!strcmp(name, rte_dmadevices[i].data->dev_name)))
+			return &rte_dmadevices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dmadev_shared_data_prepare(void)
+{
+	const struct rte_memzone *mz;
+
+	if (dmadev_shared_data == NULL) {
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+			/* Allocate port data and ownership shared memory. */
+			mz = rte_memzone_reserve(mz_rte_dmadev_data,
+					 sizeof(*dmadev_shared_data),
+					 rte_socket_id(), 0);
+		} else
+			mz = rte_memzone_lookup(mz_rte_dmadev_data);
+		if (mz == NULL)
+			return -ENOMEM;
+
+		dmadev_shared_data = mz->addr;
+		if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+			memset(dmadev_shared_data->data, 0,
+			       sizeof(dmadev_shared_data->data));
+	}
+
+	return 0;
+}
+
+static struct rte_dmadev *
+dmadev_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t dev_id;
+
+	dev = dmadev_find(name);
+	if (dev != NULL) {
+		RTE_DMADEV_LOG(ERR, "DMA device already allocated");
+		return NULL;
+	}
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data");
+		return NULL;
+	}
+
+	dev_id = dmadev_find_free_dev();
+	if (dev_id == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR, "Reached maximum number of DMA devices");
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[dev_id];
+	dev->data = &dmadev_shared_data->data[dev_id];
+	dev->data->dev_id = dev_id;
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+
+	return dev;
+}
+
+static struct rte_dmadev *
+dmadev_attach_secondary(const char *name)
+{
+	struct rte_dmadev *dev;
+	uint16_t i;
+
+	if (dmadev_shared_data_prepare() != 0) {
+		RTE_DMADEV_LOG(ERR, "Cannot allocate DMA shared data");
+		return NULL;
+	}
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (!strcmp(dmadev_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == RTE_DMADEV_MAX_DEVS) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %s is not driven by the primary process",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dmadevices[i];
+	dev->data = &dmadev_shared_data->data[i];
+	dev->dev_private = dev->data->dev_private;
+
+	return dev;
+}
+
+struct rte_dmadev *
+rte_dmadev_pmd_allocate(const char *name)
+{
+	struct rte_dmadev *dev;
+
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dmadev_allocate(name);
+	else
+		dev = dmadev_attach_secondary(name);
+
+	if (dev == NULL)
+		return NULL;
+	dev->state = RTE_DMADEV_ATTACHED;
+
+	return dev;
+}
+
+int
+rte_dmadev_pmd_release(struct rte_dmadev *dev)
+{
+	void *dev_private_tmp;
+
+	if (dev == NULL)
+		return -EINVAL;
+
+	if (dev->state == RTE_DMADEV_UNUSED)
+		return 0;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		memset(dev->data, 0, sizeof(struct rte_dmadev_data));
+
+	dev_private_tmp = dev->dev_private;
+	memset(dev, 0, sizeof(struct rte_dmadev));
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev->dev_private = dev_private_tmp;
+	dev->state = RTE_DMADEV_UNUSED;
+
+	return 0;
+}
+
+struct rte_dmadev *
+rte_dmadev_get_device_by_name(const char *name)
+{
+	if (dmadev_check_name(name) != 0)
+		return NULL;
+	return dmadev_find(name);
+}
+
+int
+rte_dmadev_get_dev_id(const char *name)
+{
+	struct rte_dmadev *dev = rte_dmadev_get_device_by_name(name);
+	if (dev != NULL)
+		return dev->data->dev_id;
+	return -EINVAL;
+}
+
+bool
+rte_dmadev_is_valid_dev(uint16_t dev_id)
+{
+	return (dev_id < RTE_DMADEV_MAX_DEVS) &&
+		rte_dmadevices[dev_id].state == RTE_DMADEV_ATTACHED;
+}
+
+uint16_t
+rte_dmadev_count(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	for (i = 0; i < RTE_DMADEV_MAX_DEVS; i++) {
+		if (rte_dmadevices[i].state == RTE_DMADEV_ATTACHED)
+			count++;
+	}
+
+	return count;
+}
+
+int
+rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dmadev_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dmadev_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->device = dev->device;
+	dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
+
+	return 0;
+}
+
+int
+rte_dmadev_configure(uint16_t dev_id, const struct rte_dmadev_conf *dev_conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans == 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure zero vchans", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans > info.max_vchans) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u configure too many vchans", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_silent &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_SILENT)) {
+		RTE_DMADEV_LOG(ERR, "Device %u don't support silent", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
+					     sizeof(struct rte_dmadev_conf));
+	if (ret == 0)
+		memcpy(&dev->data->dev_conf, dev_conf,
+		       sizeof(struct rte_dmadev_conf));
+
+	return ret;
+}
+
+int
+rte_dmadev_start(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_conf.nb_vchans == 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u must be configured first",
+			dev_id);
+		return -EINVAL;
+	}
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already started", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->data->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dmadev_stop(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started == 0) {
+		RTE_DMADEV_LOG(WARNING, "Device %u already stopped", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dmadev_close(uint16_t dev_id)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped before closing", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	return (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+		       const struct rte_dmadev_vchan_conf *conf)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	bool src_is_dev, dst_is_dev;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u must be stopped to allow configuration",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail", dev_id);
+		return -EINVAL;
+	}
+	if (dev->data->dev_conf.nb_vchans == 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u must be configured first",
+			dev_id);
+		return -EINVAL;
+	}
+	if (vchan >= info.nb_vchans) {
+		RTE_DMADEV_LOG(ERR, "Device %u vchan out range!", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+		RTE_DMADEV_LOG(ERR, "Device %u direction invalid!", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2mem transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_MEM_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support mem2dev transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_MEM)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2mem transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(info.dev_capa & RTE_DMADEV_CAPA_DEV_TO_DEV)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u don't support dev2dev transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < info.min_desc || conf->nb_desc > info.max_desc) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u number of descriptors invalid", dev_id);
+		return -EINVAL;
+	}
+	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->src_port.port_type == RTE_DMADEV_PORT_NONE && src_is_dev) ||
+	    (conf->src_port.port_type != RTE_DMADEV_PORT_NONE && !src_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u source port type invalid", dev_id);
+		return -EINVAL;
+	}
+	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->dst_port.port_type == RTE_DMADEV_PORT_NONE && dst_is_dev) ||
+	    (conf->dst_port.port_type != RTE_DMADEV_PORT_NONE && !dst_is_dev)) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u destination port type invalid", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
+					sizeof(struct rte_dmadev_vchan_conf));
+}
+
+int
+rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
+		     struct rte_dmadev_stats *stats)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (stats == NULL)
+		return -EINVAL;
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	memset(stats, 0, sizeof(struct rte_dmadev_stats));
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dmadev_stats));
+}
+
+int
+rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
+	    vchan != RTE_DMADEV_ALL_VCHAN) {
+		RTE_DMADEV_LOG(ERR,
+			"Device %u vchan %u out of range", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+static const char *
+dmadev_capability_name(uint64_t capability)
+{
+	static const struct {
+		uint64_t capability;
+		const char *name;
+	} capa_names[] = {
+		{ RTE_DMADEV_CAPA_MEM_TO_MEM,  "mem2mem" },
+		{ RTE_DMADEV_CAPA_MEM_TO_DEV,  "mem2dev" },
+		{ RTE_DMADEV_CAPA_DEV_TO_MEM,  "dev2mem" },
+		{ RTE_DMADEV_CAPA_DEV_TO_DEV,  "dev2dev" },
+		{ RTE_DMADEV_CAPA_SVA,         "sva"     },
+		{ RTE_DMADEV_CAPA_SILENT,      "silent"  },
+		{ RTE_DMADEV_CAPA_OPS_COPY,    "copy"    },
+		{ RTE_DMADEV_CAPA_OPS_COPY_SG, "copy_sg" },
+		{ RTE_DMADEV_CAPA_OPS_FILL,    "fill"    },
+	};
+
+	const char *name = "unknown";
+	uint32_t i;
+
+	for (i = 0; i < RTE_DIM(capa_names); i++) {
+		if (capability == capa_names[i].capability) {
+			name = capa_names[i].name;
+			break;
+		}
+	}
+
+	return name;
+}
+
+static void
+dmadev_dump_capability(FILE *f, uint64_t dev_capa)
+{
+	uint64_t capa;
+
+	fprintf(f, "  dev_capa: 0x%" PRIx64 " -", dev_capa);
+	while (dev_capa > 0) {
+		capa = 1ull << __builtin_ctzll(dev_capa);
+		fprintf(f, " %s", dmadev_capability_name(capa));
+		dev_capa &= ~capa;
+	}
+	fprintf(f, "\n");
+}
+
+int
+rte_dmadev_dump(uint16_t dev_id, FILE *f)
+{
+	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	struct rte_dmadev_info info;
+	int ret;
+
+	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (f == NULL)
+		return -EINVAL;
+
+	ret = rte_dmadev_info_get(dev_id, &info);
+	if (ret != 0) {
+		RTE_DMADEV_LOG(ERR, "Device %u get device info fail", dev_id);
+		return -EINVAL;
+	}
+
+	fprintf(f, "DMA Dev %u, '%s' [%s]\n",
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
+	dmadev_dump_capability(f, info.dev_capa);
+	fprintf(f, "  max_vchans_supported: %u\n", info.max_vchans);
+	fprintf(f, "  nb_vchans_configured: %u\n", info.nb_vchans);
+	fprintf(f, "  silent_mode: %s\n",
+		dev->data->dev_conf.enable_silent ? "on" : "off");
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index c8dd0009f5..3cb95fe31a 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -787,9 +787,21 @@ struct rte_dmadev_sge {
  *   - other values < 0 on failure.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
-		uint32_t length, uint64_t flags);
+		uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+#endif
+
+	return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
 
 /**
  * @warning
@@ -825,10 +837,23 @@ rte_dmadev_copy(uint16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
  *   - other values < 0 on failure.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
 		   struct rte_dmadev_sge *dst, uint16_t nb_src, uint16_t nb_dst,
-		   uint64_t flags);
+		   uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans ||
+	    src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+#endif
+
+	return (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);
+}
 
 /**
  * @warning
@@ -860,9 +885,21 @@ rte_dmadev_copy_sg(uint16_t dev_id, uint16_t vchan, struct rte_dmadev_sge *src,
  *   - other values < 0 on failure.
  */
 __rte_experimental
-int
+static inline int
 rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
-		rte_iova_t dst, uint32_t length, uint64_t flags);
+		rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+#endif
+
+	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
 
 /**
  * @warning
@@ -882,8 +919,20 @@ rte_dmadev_fill(uint16_t dev_id, uint16_t vchan, uint64_t pattern,
  *   0 on success. Otherwise negative value is returned.
  */
 __rte_experimental
-int
-rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
+static inline int
+rte_dmadev_submit(uint16_t dev_id, uint16_t vchan)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+#endif
+
+	return (*dev->submit)(dev, vchan);
+}
 
 /**
  * @warning
@@ -909,9 +958,37 @@ rte_dmadev_submit(uint16_t dev_id, uint16_t vchan);
  *   must be less than or equal to the value of nb_cpls.
  */
 __rte_experimental
-uint16_t
+static inline uint16_t
 rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
-		     uint16_t *last_idx, bool *has_error);
+		     uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans || nb_cpls == 0)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
 
 /**
  * @warning
@@ -941,10 +1018,27 @@ rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
  *   status array are also set.
  */
 __rte_experimental
-uint16_t
+static inline uint16_t
 rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
 			    const uint16_t nb_cpls, uint16_t *last_idx,
-			    enum rte_dma_status_code *status);
+			    enum rte_dma_status_code *status)
+{
+	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans ||
+	    nb_cpls == 0 || status == NULL)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
+}
 
 #ifdef __cplusplus
 }
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index cbf5e88621..398bdf6391 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -182,4 +182,6 @@ struct rte_dmadev {
 	uint64_t reserved[2]; /**< Reserved for future fields. */
 } __rte_cache_aligned;
 
+extern struct rte_dmadev rte_dmadevices[];
+
 #endif /* _RTE_DMADEV_CORE_H_ */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index d027eeac97..80be592713 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -26,6 +26,7 @@ EXPERIMENTAL {
 INTERNAL {
         global:
 
+	rte_dmadevices;
 	rte_dmadev_get_device_by_name;
 	rte_dmadev_pmd_allocate;
 	rte_dmadev_pmd_release;
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v21 5/7] doc: add DMA device library guide
  2021-09-07 12:56 ` [dpdk-dev] [PATCH v21 " Chengwen Feng
                     ` (3 preceding siblings ...)
  2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 4/7] dmadev: introduce DMA device library implementation Chengwen Feng
@ 2021-09-07 12:56   ` Chengwen Feng
  2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 6/7] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
  2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 7/7] app/test: add dmadev API test Chengwen Feng
  6 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-07 12:56 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch adds dmadev library guide.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Conor Walsh <conor.walsh@intel.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
---
 MAINTAINERS                          |   1 +
 doc/guides/prog_guide/dmadev.rst     | 125 ++++++++++++
 doc/guides/prog_guide/img/dmadev.svg | 283 +++++++++++++++++++++++++++
 doc/guides/prog_guide/index.rst      |   1 +
 4 files changed, 410 insertions(+)
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg

diff --git a/MAINTAINERS b/MAINTAINERS
index 9885cc56b7..e237e9406b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -499,6 +499,7 @@ F: doc/guides/prog_guide/rawdev.rst
 DMA device API - EXPERIMENTAL
 M: Chengwen Feng <fengchengwen@huawei.com>
 F: lib/dmadev/
+F: doc/guides/prog_guide/dmadev.rst
 
 
 Memory Pool Drivers
diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
new file mode 100644
index 0000000000..e47a164850
--- /dev/null
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -0,0 +1,125 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright 2021 HiSilicon Limited
+
+DMA Device Library
+====================
+
+The DMA library provides a DMA device framework for management and provisioning
+of hardware and software DMA poll mode drivers, defining generic APIs which
+support a number of different DMA operations.
+
+
+Design Principles
+-----------------
+
+The DMA library follows the same basic principles as those used in DPDK's
+Ethernet Device framework and the RegEx framework. The DMA framework provides
+a generic DMA device framework which supports both physical (hardware)
+and virtual (software) DMA devices as well as a generic DMA API which allows
+DMA devices to be managed and configured and supports DMA operations to be
+provisioned on DMA poll mode driver.
+
+.. _figure_dmadev:
+
+.. figure:: img/dmadev.*
+
+The above figure shows the model on which the DMA framework is built on:
+
+ * The DMA controller could have multiple hardware DMA channels (aka. hardware
+   DMA queues), each hardware DMA channel should be represented by a dmadev.
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+   channel represents a different transfer context. The DMA operation request
+   must be submitted to the virtual DMA channel. e.g. Application could create
+   virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+   virtual DMA channel 1 for memory-to-device transfer scenario.
+
+
+Device Management
+-----------------
+
+Device Creation
+~~~~~~~~~~~~~~~
+
+Physical DMA controllers are discovered during the PCI probe/enumeration of the
+EAL function which is executed at DPDK initialization, this is based on their
+PCI BDF (bus/bridge, device, function). Specific physical DMA controllers, like
+other physical devices in DPDK can be listed using the EAL command line options.
+
+The dmadevs are dynamically allocated by using the API
+``rte_dmadev_pmd_allocate`` based on the number of hardware DMA channels.
+
+
+Device Identification
+~~~~~~~~~~~~~~~~~~~~~
+
+Each DMA device, whether physical or virtual is uniquely designated by two
+identifiers:
+
+- A unique device index used to designate the DMA device in all functions
+  exported by the DMA API.
+
+- A device name used to designate the DMA device in console messages, for
+  administration or debugging purposes.
+
+
+Device Configuration
+~~~~~~~~~~~~~~~~~~~~
+
+The rte_dmadev_configure API is used to configure a DMA device.
+
+.. code-block:: c
+
+   int rte_dmadev_configure(uint16_t dev_id,
+                            const struct rte_dmadev_conf *dev_conf);
+
+The ``rte_dmadev_conf`` structure is used to pass the configuration parameters
+for the DMA device for example the number of virtual DMA channels to set up,
+indication of whether to enable silent mode.
+
+
+Configuration of Virtual DMA Channels
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The rte_dmadev_vchan_setup API is used to configure a virtual DMA channel.
+
+.. code-block:: c
+
+   int rte_dmadev_vchan_setup(uint16_t dev_id, uint16_t vchan,
+                              const struct rte_dmadev_vchan_conf *conf);
+
+The ``rte_dmadev_vchan_conf`` structure is used to pass the configuration
+parameters for the virtual DMA channel for example transfer direction, number of
+descriptor for the virtual DMA channel, source device access port parameter,
+destination device access port parameter.
+
+
+Device Features and Capabilities
+--------------------------------
+
+DMA devices may support different feature sets. The ``rte_dmadev_info_get`` API
+can be used to get the device info and supported features.
+
+Silent mode is a special device capability which does not require the
+application to invoke dequeue APIs.
+
+
+Enqueue / Dequeue APIs
+~~~~~~~~~~~~~~~~~~~~~~
+
+Enqueue APIs such as ``rte_dmadev_copy`` and ``rte_dmadev_fill`` can be used to
+enqueue operations to hardware. If an enqueue is successful, a ``ring_idx`` is
+returned. This ``ring_idx`` can be used by applications to track per operation
+metadata in an application-defined circular ring.
+
+The ``rte_dmadev_submit`` API is used to issue doorbell to hardware.
+Alternatively the ``RTE_DMA_OP_FLAG_SUBMIT`` flag can be passed to the enqueue
+APIs to also issue the doorbell to hardware.
+
+There are two dequeue APIs ``rte_dmadev_completed`` and
+``rte_dmadev_completed_status``, these are used to obtain the results of
+the enqueue requests. ``rte_dmadev_completed`` will return the number of
+successfully completed operations. ``rte_dmadev_completed_status`` will return
+the number of completed operations along with the status of each operation
+(filled into the ``status`` array passed by user). These two APIs can also
+return the last completed operation's ``ring_idx`` which could help user track
+operations within their own application-defined rings.
diff --git a/doc/guides/prog_guide/img/dmadev.svg b/doc/guides/prog_guide/img/dmadev.svg
new file mode 100644
index 0000000000..157d7eb7dc
--- /dev/null
+++ b/doc/guides/prog_guide/img/dmadev.svg
@@ -0,0 +1,283 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<!-- SPDX-License-Identifier: BSD-3-Clause -->
+<!-- Copyright(c) 2021 HiSilicon Limited -->
+
+<svg
+   width="128.64288mm"
+   height="95.477707mm"
+   viewBox="0 0 192.96433 143.21656"
+   version="1.1"
+   id="svg934"
+   inkscape:version="1.1 (c68e22c387, 2021-05-23)"
+   sodipodi:docname="dmadev.svg"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg">
+  <sodipodi:namedview
+     id="namedview936"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageshadow="2"
+     inkscape:pageopacity="0.0"
+     inkscape:pagecheckerboard="0"
+     inkscape:document-units="mm"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:showpageshadow="false"
+     inkscape:zoom="1.332716"
+     inkscape:cx="335.03011"
+     inkscape:cy="143.69152"
+     inkscape:window-width="1920"
+     inkscape:window-height="976"
+     inkscape:window-x="-8"
+     inkscape:window-y="-8"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="layer1"
+     scale-x="1.5"
+     units="mm" />
+  <defs
+     id="defs931">
+    <rect
+       x="342.43954"
+       y="106.56832"
+       width="58.257381"
+       height="137.82834"
+       id="rect17873" />
+  </defs>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-0.13857517,-21.527306)">
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9"
+       width="50"
+       height="28"
+       x="0.13857517"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1"
+       transform="translate(-49.110795,15.205683)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1045">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1047">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5"
+       width="50"
+       height="28"
+       x="60.138577"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4"
+       transform="translate(10.512565,15.373298)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1049">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1051">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5-3"
+       width="50"
+       height="28"
+       x="137.43863"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-8"
+       transform="translate(88.79231,15.373299)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1053">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1055">channel</tspan></text>
+    <text
+       xml:space="preserve"
+       transform="matrix(0.26458333,0,0,0.26458333,-0.04940429,21.408845)"
+       id="text17871"
+       style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;white-space:pre;shape-inside:url(#rect17873);fill:#000000;fill-opacity:1;stroke:none" />
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8"
+       width="38.34557"
+       height="19.729115"
+       x="36.138577"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3"
+       transform="translate(-13.394978,59.135217)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1057">dmadev</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0"
+       width="60.902534"
+       height="24.616455"
+       x="25.196909"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76"
+       transform="translate(-24.485484,90.97883)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1059">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1061">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-6"
+       width="60.902534"
+       height="24.616455"
+       x="132.20036"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-7"
+       transform="translate(82.950904,90.79085)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1063">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1065">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-4"
+       width="60.902534"
+       height="24.616455"
+       x="76.810928"
+       y="140.12741"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-4"
+       transform="translate(27.032341,133.10574)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1067">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1069">controller</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8-5"
+       width="38.34557"
+       height="19.729115"
+       x="143.43863"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-7"
+       transform="translate(94.92597,59.664385)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1071">dmadev</tspan></text>
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 74.476373,49.527306 62.82407,64.827354"
+       id="path45308"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 35.924309,49.527306 47.711612,64.827354"
+       id="path45310"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 55.403414,84.556469 55.53332,98.47744"
+       id="path45312"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8"
+       inkscape:connection-end="#rect31-9-5-8-0" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.62241,84.556469 0.0155,13.920971"
+       id="path45320"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-5"
+       inkscape:connection-end="#rect31-9-5-8-0-6" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 146.28317,123.09389 -22.65252,17.03352"
+       id="path45586"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0-6"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 70.900938,123.09389 21.108496,17.03352"
+       id="path45588"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.50039,49.527306 0.0675,15.300048"
+       id="path45956"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-3"
+       inkscape:connection-end="#rect31-9-5-8-5" />
+  </g>
+</svg>
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 2dce507f46..0abea06b24 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -29,6 +29,7 @@ Programmer's Guide
     regexdev
     rte_security
     rawdev
+    dmadev
     link_bonding_poll_mode_drv_lib
     timer_lib
     hash_lib
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v21 6/7] dma/skeleton: introduce skeleton dmadev driver
  2021-09-07 12:56 ` [dpdk-dev] [PATCH v21 " Chengwen Feng
                     ` (4 preceding siblings ...)
  2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 5/7] doc: add DMA device library guide Chengwen Feng
@ 2021-09-07 12:56   ` Chengwen Feng
  2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 7/7] app/test: add dmadev API test Chengwen Feng
  6 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-07 12:56 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

Skeleton dmadevice driver, on the lines of rawdev skeleton, is for
showcasing of the dmadev library.

Design of skeleton involves a virtual device which is plugged into VDEV
bus on initialization.

Also, enable compilation of dmadev skeleton drivers.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 MAINTAINERS                            |   1 +
 drivers/dma/meson.build                |  11 +
 drivers/dma/skeleton/meson.build       |   7 +
 drivers/dma/skeleton/skeleton_dmadev.c | 594 +++++++++++++++++++++++++
 drivers/dma/skeleton/skeleton_dmadev.h |  61 +++
 drivers/dma/skeleton/version.map       |   3 +
 drivers/meson.build                    |   1 +
 7 files changed, 678 insertions(+)
 create mode 100644 drivers/dma/meson.build
 create mode 100644 drivers/dma/skeleton/meson.build
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
 create mode 100644 drivers/dma/skeleton/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index e237e9406b..2b505ce71e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -499,6 +499,7 @@ F: doc/guides/prog_guide/rawdev.rst
 DMA device API - EXPERIMENTAL
 M: Chengwen Feng <fengchengwen@huawei.com>
 F: lib/dmadev/
+F: drivers/dma/skeleton/
 F: doc/guides/prog_guide/dmadev.rst
 
 
diff --git a/drivers/dma/meson.build b/drivers/dma/meson.build
new file mode 100644
index 0000000000..0c2c34cd00
--- /dev/null
+++ b/drivers/dma/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+if is_windows
+    subdir_done()
+endif
+
+drivers = [
+        'skeleton',
+]
+std_deps = ['dmadev']
diff --git a/drivers/dma/skeleton/meson.build b/drivers/dma/skeleton/meson.build
new file mode 100644
index 0000000000..27509b1668
--- /dev/null
+++ b/drivers/dma/skeleton/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+deps += ['dmadev', 'kvargs', 'ring', 'bus_vdev']
+sources = files(
+        'skeleton_dmadev.c',
+)
diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c
new file mode 100644
index 0000000000..0cc7e2409f
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.c
@@ -0,0 +1,594 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#include <inttypes.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_cycles.h>
+#include <rte_eal.h>
+#include <rte_kvargs.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+
+#include <rte_dmadev_pmd.h>
+
+#include "skeleton_dmadev.h"
+
+RTE_LOG_REGISTER_DEFAULT(skeldma_logtype, INFO);
+#define SKELDMA_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, skeldma_logtype, "%s(): " fmt "\n", \
+		__func__, ##args)
+
+/* Count of instances, currently only 1 is supported. */
+static uint16_t skeldma_count;
+
+static int
+skeldma_info_get(const struct rte_dmadev *dev, struct rte_dmadev_info *dev_info,
+		 uint32_t info_sz)
+{
+#define SKELDMA_MAX_DESC	8192
+#define SKELDMA_MIN_DESC	128
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(info_sz);
+
+	dev_info->dev_capa = RTE_DMADEV_CAPA_MEM_TO_MEM |
+			     RTE_DMADEV_CAPA_SVA |
+			     RTE_DMADEV_CAPA_OPS_COPY;
+	dev_info->max_vchans = 1;
+	dev_info->max_desc = SKELDMA_MAX_DESC;
+	dev_info->min_desc = SKELDMA_MIN_DESC;
+
+	return 0;
+}
+
+static int
+skeldma_configure(struct rte_dmadev *dev, const struct rte_dmadev_conf *conf,
+		  uint32_t conf_sz)
+{
+	RTE_SET_USED(dev);
+	RTE_SET_USED(conf);
+	RTE_SET_USED(conf_sz);
+	return 0;
+}
+
+static void *
+cpucopy_thread(void *param)
+{
+#define SLEEP_THRESHOLD		10000
+#define SLEEP_US_VAL		10
+
+	struct rte_dmadev *dev = (struct rte_dmadev *)param;
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	int ret;
+
+	while (!hw->exit_flag) {
+		ret = rte_ring_dequeue(hw->desc_running, (void **)&desc);
+		if (ret) {
+			hw->zero_req_count++;
+			if (hw->zero_req_count > SLEEP_THRESHOLD) {
+				if (hw->zero_req_count == 0)
+					hw->zero_req_count = SLEEP_THRESHOLD;
+				rte_delay_us_sleep(SLEEP_US_VAL);
+			}
+			continue;
+		}
+
+		hw->zero_req_count = 0;
+		rte_memcpy(desc->dst, desc->src, desc->len);
+		hw->completed_count++;
+		(void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
+	}
+
+	return NULL;
+}
+
+static void
+fflush_ring(struct skeldma_hw *hw, struct rte_ring *ring)
+{
+	struct skeldma_desc *desc = NULL;
+	while (rte_ring_count(ring) > 0) {
+		(void)rte_ring_dequeue(ring, (void **)&desc);
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+}
+
+static int
+skeldma_start(struct rte_dmadev *dev)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	rte_cpuset_t cpuset;
+	int ret;
+
+	if (hw->desc_mem == NULL) {
+		SKELDMA_LOG(ERR, "Vchan was not setup, start fail!");
+		return -EINVAL;
+	}
+
+	/* Reset the dmadev to a known state, include:
+	 * 1) fflush pending/running/completed ring to empty ring.
+	 * 2) init ring idx to zero.
+	 * 3) init running statistics.
+	 * 4) mark cpucopy task exit_flag to false.
+	 */
+	fflush_ring(hw, hw->desc_pending);
+	fflush_ring(hw, hw->desc_running);
+	fflush_ring(hw, hw->desc_completed);
+	hw->ridx = 0;
+	hw->submitted_count = 0;
+	hw->zero_req_count = 0;
+	hw->completed_count = 0;
+	hw->exit_flag = false;
+
+	rte_mb();
+
+	ret = rte_ctrl_thread_create(&hw->thread, "dma_skeleton", NULL,
+				     cpucopy_thread, dev);
+	if (ret) {
+		SKELDMA_LOG(ERR, "Start cpucopy thread fail!");
+		return -EINVAL;
+	}
+
+	if (hw->lcore_id != -1) {
+		cpuset = rte_lcore_cpuset(hw->lcore_id);
+		ret = pthread_setaffinity_np(hw->thread, sizeof(cpuset),
+					     &cpuset);
+		if (ret)
+			SKELDMA_LOG(WARNING,
+				"Set thread affinity lcore = %u fail!",
+				hw->lcore_id);
+	}
+
+	return 0;
+}
+
+static int
+skeldma_stop(struct rte_dmadev *dev)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	hw->exit_flag = true;
+	rte_delay_ms(1);
+
+	pthread_cancel(hw->thread);
+	pthread_join(hw->thread, NULL);
+
+	return 0;
+}
+
+static int
+vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc)
+{
+	struct skeldma_desc *desc;
+	struct rte_ring *empty;
+	struct rte_ring *pending;
+	struct rte_ring *running;
+	struct rte_ring *completed;
+	uint16_t i;
+
+	desc = rte_zmalloc_socket("dma_skelteon_desc",
+				  nb_desc * sizeof(struct skeldma_desc),
+				  RTE_CACHE_LINE_SIZE, hw->socket_id);
+	if (desc == NULL) {
+		SKELDMA_LOG(ERR, "Malloc dma skeleton desc fail!");
+		return -ENOMEM;
+	}
+
+	empty = rte_ring_create("dma_skeleton_desc_empty", nb_desc,
+				hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	pending = rte_ring_create("dma_skeleton_desc_pending", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	running = rte_ring_create("dma_skeleton_desc_running", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	completed = rte_ring_create("dma_skeleton_desc_completed", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (empty == NULL || pending == NULL || running == NULL ||
+	    completed == NULL) {
+		SKELDMA_LOG(ERR, "Create dma skeleton desc ring fail!");
+		rte_ring_free(empty);
+		rte_ring_free(pending);
+		rte_ring_free(running);
+		rte_ring_free(completed);
+		rte_free(desc);
+		return -ENOMEM;
+	}
+
+	/* The real usable ring size is *count-1* instead of *count* to
+	 * differentiate a free ring from an empty ring.
+	 * @see rte_ring_create
+	 */
+	for (i = 0; i < nb_desc - 1; i++)
+		(void)rte_ring_enqueue(empty, (void *)(desc + i));
+
+	hw->desc_mem = desc;
+	hw->desc_empty = empty;
+	hw->desc_pending = pending;
+	hw->desc_running = running;
+	hw->desc_completed = completed;
+
+	return 0;
+}
+
+static void
+vchan_release(struct skeldma_hw *hw)
+{
+	if (hw->desc_mem == NULL)
+		return;
+
+	rte_free(hw->desc_mem);
+	hw->desc_mem = NULL;
+	rte_ring_free(hw->desc_empty);
+	hw->desc_empty = NULL;
+	rte_ring_free(hw->desc_pending);
+	hw->desc_pending = NULL;
+	rte_ring_free(hw->desc_running);
+	hw->desc_running = NULL;
+	rte_ring_free(hw->desc_completed);
+	hw->desc_completed = NULL;
+}
+
+static int
+skeldma_close(struct rte_dmadev *dev)
+{
+	/* The device already stopped */
+	vchan_release(dev->dev_private);
+	return 0;
+}
+
+static int
+skeldma_vchan_setup(struct rte_dmadev *dev, uint16_t vchan,
+		    const struct rte_dmadev_vchan_conf *conf,
+		    uint32_t conf_sz)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(conf_sz);
+
+	if (!rte_is_power_of_2(conf->nb_desc)) {
+		SKELDMA_LOG(ERR, "Number of desc must be power of 2!");
+		return -EINVAL;
+	}
+
+	vchan_release(hw);
+	return vchan_setup(hw, conf->nb_desc);
+}
+
+static int
+skeldma_stats_get(const struct rte_dmadev *dev, uint16_t vchan,
+		  struct rte_dmadev_stats *stats, uint32_t stats_sz)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(stats_sz);
+
+	stats->submitted = hw->submitted_count;
+	stats->completed = hw->completed_count;
+	stats->errors = 0;
+
+	return 0;
+}
+
+static int
+skeldma_stats_reset(struct rte_dmadev *dev, uint16_t vchan)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+
+	hw->submitted_count = 0;
+	hw->completed_count = 0;
+
+	return 0;
+}
+
+static int
+skeldma_dump(const struct rte_dmadev *dev, FILE *f)
+{
+#define GET_RING_COUNT(ring)	((ring) ? (rte_ring_count(ring)) : 0)
+
+	struct skeldma_hw *hw = dev->dev_private;
+
+	fprintf(f,
+		"  lcore_id: %d\n"
+		"  socket_id: %d\n"
+		"  desc_empty_ring_count: %u\n"
+		"  desc_pending_ring_count: %u\n"
+		"  desc_running_ring_count: %u\n"
+		"  desc_completed_ring_count: %u\n",
+		hw->lcore_id, hw->socket_id,
+		GET_RING_COUNT(hw->desc_empty),
+		GET_RING_COUNT(hw->desc_pending),
+		GET_RING_COUNT(hw->desc_running),
+		GET_RING_COUNT(hw->desc_completed));
+	fprintf(f,
+		"  next_ring_idx: %u\n"
+		"  submitted_count: %" PRIu64 "\n"
+		"  completed_count: %" PRIu64 "\n",
+		hw->ridx, hw->submitted_count, hw->completed_count);
+
+	return 0;
+}
+
+static inline void
+submit(struct skeldma_hw *hw, struct skeldma_desc *desc)
+{
+	uint16_t count = rte_ring_count(hw->desc_pending);
+	struct skeldma_desc *pend_desc = NULL;
+
+	while (count > 0) {
+		(void)rte_ring_dequeue(hw->desc_pending, (void **)&pend_desc);
+		(void)rte_ring_enqueue(hw->desc_running, (void *)pend_desc);
+		count--;
+	}
+
+	if (desc)
+		(void)rte_ring_enqueue(hw->desc_running, (void *)desc);
+}
+
+static int
+skeldma_copy(struct rte_dmadev *dev, uint16_t vchan,
+	     rte_iova_t src, rte_iova_t dst,
+	     uint32_t length, uint64_t flags)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc;
+	int ret;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(flags);
+
+	ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc);
+	if (ret)
+		return -ENOSPC;
+	desc->src = (void *)(uintptr_t)src;
+	desc->dst = (void *)(uintptr_t)dst;
+	desc->len = length;
+	desc->ridx = hw->ridx;
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT)
+		submit(hw, desc);
+	else
+		(void)rte_ring_enqueue(hw->desc_pending, (void *)desc);
+	hw->submitted_count++;
+
+	return hw->ridx++;
+}
+
+static int
+skeldma_submit(struct rte_dmadev *dev, uint16_t vchan)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	RTE_SET_USED(vchan);
+	submit(hw, NULL);
+	return 0;
+}
+
+static uint16_t
+skeldma_completed(struct rte_dmadev *dev,
+		  uint16_t vchan, const uint16_t nb_cpls,
+		  uint16_t *last_idx, bool *has_error)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	uint16_t index = 0;
+	uint16_t count;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(has_error);
+
+	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+	while (index < count) {
+		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+		if (index == count - 1)
+			*last_idx = desc->ridx;
+		index++;
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+
+	return count;
+}
+
+static uint16_t
+skeldma_completed_status(struct rte_dmadev *dev,
+			 uint16_t vchan, const uint16_t nb_cpls,
+			 uint16_t *last_idx, enum rte_dma_status_code *status)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	uint16_t index = 0;
+	uint16_t count;
+
+	RTE_SET_USED(vchan);
+
+	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+	while (index < count) {
+		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+		if (index == count - 1)
+			*last_idx = desc->ridx;
+		status[index++] = RTE_DMA_STATUS_SUCCESSFUL;
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+
+	return count;
+}
+
+static const struct rte_dmadev_ops skeldma_ops = {
+	.dev_info_get  = skeldma_info_get,
+	.dev_configure = skeldma_configure,
+	.dev_start     = skeldma_start,
+	.dev_stop      = skeldma_stop,
+	.dev_close     = skeldma_close,
+
+	.vchan_setup   = skeldma_vchan_setup,
+
+	.stats_get     = skeldma_stats_get,
+	.stats_reset   = skeldma_stats_reset,
+
+	.dev_dump      = skeldma_dump,
+};
+
+static int
+skeldma_create(const char *name, struct rte_vdev_device *vdev, int lcore_id)
+{
+	struct rte_dmadev *dev;
+	struct skeldma_hw *hw;
+	int socket_id;
+
+	dev = rte_dmadev_pmd_allocate(name);
+	if (dev == NULL) {
+		SKELDMA_LOG(ERR, "Unable to allocate dmadev: %s", name);
+		return -EINVAL;
+	}
+
+	socket_id = (lcore_id < 0) ? rte_socket_id() :
+				     rte_lcore_to_socket_id(lcore_id);
+	dev->dev_private = rte_zmalloc_socket("dmadev private",
+					      sizeof(struct skeldma_hw),
+					      RTE_CACHE_LINE_SIZE,
+					      socket_id);
+	if (!dev->dev_private) {
+		SKELDMA_LOG(ERR, "Unable to allocate device private memory");
+		(void)rte_dmadev_pmd_release(dev);
+		return -ENOMEM;
+	}
+
+	dev->copy = skeldma_copy;
+	dev->submit = skeldma_submit;
+	dev->completed = skeldma_completed;
+	dev->completed_status = skeldma_completed_status;
+	dev->dev_ops = &skeldma_ops;
+	dev->device = &vdev->device;
+
+	hw = dev->dev_private;
+	hw->lcore_id = lcore_id;
+	hw->socket_id = socket_id;
+
+	return dev->data->dev_id;
+}
+
+static int
+skeldma_destroy(const char *name)
+{
+	struct rte_dmadev *dev;
+	int ret;
+
+	dev = rte_dmadev_get_device_by_name(name);
+	if (!dev)
+		return -EINVAL;
+
+	ret = rte_dmadev_close(dev->data->dev_id);
+	if (ret)
+		return ret;
+
+	rte_free(dev->dev_private);
+	dev->dev_private = NULL;
+	(void)rte_dmadev_pmd_release(dev);
+
+	return 0;
+}
+
+static int
+skeldma_parse_lcore(const char *key __rte_unused,
+		    const char *value,
+		    void *opaque)
+{
+	int lcore_id = atoi(value);
+	if (lcore_id >= 0 && lcore_id < RTE_MAX_LCORE)
+		*(int *)opaque = lcore_id;
+	return 0;
+}
+
+static void
+skeldma_parse_vdev_args(struct rte_vdev_device *vdev, int *lcore_id)
+{
+	static const char *const args[] = {
+		SKELDMA_ARG_LCORE,
+		NULL
+	};
+
+	struct rte_kvargs *kvlist;
+	const char *params;
+
+	params = rte_vdev_device_args(vdev);
+	if (params == NULL || params[0] == '\0')
+		return;
+
+	kvlist = rte_kvargs_parse(params, args);
+	if (!kvlist)
+		return;
+
+	(void)rte_kvargs_process(kvlist, SKELDMA_ARG_LCORE,
+				 skeldma_parse_lcore, lcore_id);
+	SKELDMA_LOG(INFO, "Parse lcore_id = %d", *lcore_id);
+
+	rte_kvargs_free(kvlist);
+}
+
+static int
+skeldma_probe(struct rte_vdev_device *vdev)
+{
+	const char *name;
+	int lcore_id = -1;
+	int ret;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		SKELDMA_LOG(ERR, "Multiple process not supported for %s", name);
+		return -EINVAL;
+	}
+
+	/* More than one instance is not supported */
+	if (skeldma_count > 0) {
+		SKELDMA_LOG(ERR, "Multiple instance not supported for %s",
+			name);
+		return -EINVAL;
+	}
+
+	skeldma_parse_vdev_args(vdev, &lcore_id);
+
+	ret = skeldma_create(name, vdev, lcore_id);
+	if (ret >= 0) {
+		SKELDMA_LOG(INFO, "Create %s dmadev with lcore-id %d",
+			name, lcore_id);
+		skeldma_count = 1;
+	}
+
+	return ret < 0 ? ret : 0;
+}
+
+static int
+skeldma_remove(struct rte_vdev_device *vdev)
+{
+	const char *name;
+	int ret;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -1;
+
+	ret = skeldma_destroy(name);
+	if (!ret) {
+		skeldma_count = 0;
+		SKELDMA_LOG(INFO, "Remove %s dmadev", name);
+	}
+
+	return ret;
+}
+
+static struct rte_vdev_driver skeldma_pmd_drv = {
+	.probe = skeldma_probe,
+	.remove = skeldma_remove,
+	.drv_flags = RTE_VDEV_DRV_NEED_IOVA_AS_VA,
+};
+
+RTE_PMD_REGISTER_VDEV(dma_skeleton, skeldma_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(dma_skeleton,
+		SKELDMA_ARG_LCORE "=<uint16> ");
diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h
new file mode 100644
index 0000000000..1cdfdde153
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#ifndef __SKELETON_DMADEV_H__
+#define __SKELETON_DMADEV_H__
+
+#include <pthread.h>
+
+#include <rte_ring.h>
+
+#define SKELDMA_ARG_LCORE	"lcore"
+
+struct skeldma_desc {
+	void *src;
+	void *dst;
+	uint32_t len;
+	uint16_t ridx; /* ring idx */
+};
+
+struct skeldma_hw {
+	int lcore_id; /* cpucopy task affinity core */
+	int socket_id;
+	pthread_t thread; /* cpucopy task thread */
+	volatile int exit_flag; /* cpucopy task exit flag */
+
+	struct skeldma_desc *desc_mem;
+
+	/* Descriptor ring state machine:
+	 *
+	 *  -----------     enqueue without submit     -----------
+	 *  |  empty  |------------------------------->| pending |
+	 *  -----------\                               -----------
+	 *       ^      \------------                       |
+	 *       |                  |                       |submit doorbell
+	 *       |                  |                       |
+	 *       |                  |enqueue with submit    |
+	 *       |get completed     |------------------|    |
+	 *       |                                     |    |
+	 *       |                                     v    v
+	 *  -----------     cpucopy thread working     -----------
+	 *  |completed|<-------------------------------| running |
+	 *  -----------                                -----------
+	 */
+	struct rte_ring *desc_empty;
+	struct rte_ring *desc_pending;
+	struct rte_ring *desc_running;
+	struct rte_ring *desc_completed;
+
+	/* Cache delimiter for dataplane API's operation data */
+	char cache1 __rte_cache_aligned;
+	uint16_t ridx;  /* ring idx */
+	uint64_t submitted_count;
+
+	/* Cache delimiter for cpucopy thread's operation data */
+	char cache2 __rte_cache_aligned;
+	uint32_t zero_req_count;
+	uint64_t completed_count;
+};
+
+#endif /* __SKELETON_DMADEV_H__ */
diff --git a/drivers/dma/skeleton/version.map b/drivers/dma/skeleton/version.map
new file mode 100644
index 0000000000..c2e0723b4c
--- /dev/null
+++ b/drivers/dma/skeleton/version.map
@@ -0,0 +1,3 @@
+DPDK_22 {
+	local: *;
+};
diff --git a/drivers/meson.build b/drivers/meson.build
index d9e331ec85..a390787d6a 100644
--- a/drivers/meson.build
+++ b/drivers/meson.build
@@ -18,6 +18,7 @@ subdirs = [
         'vdpa',           # depends on common, bus and mempool.
         'event',          # depends on common, bus, mempool and net.
         'baseband',       # depends on common and bus.
+        'dma',            # depends on common and bus.
 ]
 
 if meson.is_cross_build()
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v21 7/7] app/test: add dmadev API test
  2021-09-07 12:56 ` [dpdk-dev] [PATCH v21 " Chengwen Feng
                     ` (5 preceding siblings ...)
  2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 6/7] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
@ 2021-09-07 12:56   ` Chengwen Feng
  6 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-07 12:56 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch add dmadev API test which based on 'dma_skeleton' vdev. The
test cases could be executed using 'dmadev_autotest' command in test
framework.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 MAINTAINERS                |   1 +
 app/test/meson.build       |   4 +
 app/test/test_dmadev.c     |  43 +++
 app/test/test_dmadev_api.c | 543 +++++++++++++++++++++++++++++++++++++
 4 files changed, 591 insertions(+)
 create mode 100644 app/test/test_dmadev.c
 create mode 100644 app/test/test_dmadev_api.c

diff --git a/MAINTAINERS b/MAINTAINERS
index 2b505ce71e..a19a3cb53c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -500,6 +500,7 @@ DMA device API - EXPERIMENTAL
 M: Chengwen Feng <fengchengwen@huawei.com>
 F: lib/dmadev/
 F: drivers/dma/skeleton/
+F: app/test/test_dmadev*
 F: doc/guides/prog_guide/dmadev.rst
 
 
diff --git a/app/test/meson.build b/app/test/meson.build
index a7611686ad..9027eba3a4 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -43,6 +43,8 @@ test_sources = files(
         'test_debug.c',
         'test_distributor.c',
         'test_distributor_perf.c',
+        'test_dmadev.c',
+        'test_dmadev_api.c',
         'test_eal_flags.c',
         'test_eal_fs.c',
         'test_efd.c',
@@ -162,6 +164,7 @@ test_deps = [
         'cmdline',
         'cryptodev',
         'distributor',
+        'dmadev',
         'efd',
         'ethdev',
         'eventdev',
@@ -333,6 +336,7 @@ driver_test_names = [
         'cryptodev_sw_mvsam_autotest',
         'cryptodev_sw_snow3g_autotest',
         'cryptodev_sw_zuc_autotest',
+        'dmadev_autotest',
         'eventdev_selftest_octeontx',
         'eventdev_selftest_sw',
         'rawdev_autotest',
diff --git a/app/test/test_dmadev.c b/app/test/test_dmadev.c
new file mode 100644
index 0000000000..92c47fc041
--- /dev/null
+++ b/app/test/test_dmadev.c
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <rte_dmadev.h>
+#include <rte_bus_vdev.h>
+
+#include "test.h"
+
+/* from test_dmadev_api.c */
+extern int test_dmadev_api(uint16_t dev_id);
+
+static int
+test_apis(void)
+{
+	const char *pmd = "dma_skeleton";
+	int id;
+	int ret;
+
+	if (rte_vdev_init(pmd, NULL) < 0)
+		return TEST_SKIPPED;
+	id = rte_dmadev_get_dev_id(pmd);
+	if (id < 0)
+		return TEST_SKIPPED;
+	printf("\n### Test dmadev infrastructure using skeleton driver\n");
+	ret = test_dmadev_api(id);
+	rte_vdev_uninit(pmd);
+
+	return ret;
+}
+
+static int
+test_dmadev(void)
+{
+	/* basic sanity on dmadev infrastructure */
+	if (test_apis() < 0)
+		return -1;
+
+	return 0;
+}
+
+REGISTER_TEST_COMMAND(dmadev_autotest, test_dmadev);
diff --git a/app/test/test_dmadev_api.c b/app/test/test_dmadev_api.c
new file mode 100644
index 0000000000..55046ac485
--- /dev/null
+++ b/app/test/test_dmadev_api.c
@@ -0,0 +1,543 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#include <string.h>
+
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_test.h>
+#include <rte_dmadev.h>
+
+extern int test_dmadev_api(uint16_t dev_id);
+
+#define SKELDMA_TEST_RUN(test) \
+	testsuite_run_test(test, #test)
+
+#define TEST_MEMCPY_SIZE	1024
+#define TEST_WAIT_US_VAL	50000
+
+#define TEST_SUCCESS 0
+#define TEST_FAILED  -1
+
+static uint16_t test_dev_id;
+static uint16_t invalid_dev_id;
+
+static char *src;
+static char *dst;
+
+static int total;
+static int passed;
+static int failed;
+
+static int
+testsuite_setup(uint16_t dev_id)
+{
+	test_dev_id = dev_id;
+	invalid_dev_id = RTE_DMADEV_MAX_DEVS;
+
+	src = rte_malloc("dmadev_test_src", TEST_MEMCPY_SIZE, 0);
+	if (src == NULL)
+		return -ENOMEM;
+	dst = rte_malloc("dmadev_test_dst", TEST_MEMCPY_SIZE, 0);
+	if (dst == NULL) {
+		rte_free(src);
+		src = NULL;
+		return -ENOMEM;
+	}
+
+	total = 0;
+	passed = 0;
+	failed = 0;
+
+	/* Set dmadev log level to critical to suppress unnecessary output
+	 * during API tests.
+	 */
+	rte_log_set_level_pattern("lib.dmadev", RTE_LOG_CRIT);
+
+	return 0;
+}
+
+static void
+testsuite_teardown(void)
+{
+	rte_free(src);
+	src = NULL;
+	rte_free(dst);
+	dst = NULL;
+	/* Ensure the dmadev is stopped. */
+	rte_dmadev_stop(test_dev_id);
+
+	rte_log_set_level_pattern("lib.dmadev", RTE_LOG_INFO);
+}
+
+static void
+testsuite_run_test(int (*test)(void), const char *name)
+{
+	int ret = 0;
+
+	if (test) {
+		ret = test();
+		if (ret < 0) {
+			failed++;
+			printf("%s Failed\n", name);
+		} else {
+			passed++;
+			printf("%s Passed\n", name);
+		}
+	}
+
+	total++;
+}
+
+static int
+test_dmadev_get_dev_id(void)
+{
+	int ret = rte_dmadev_get_dev_id("invalid_dmadev_device");
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_is_valid_dev(void)
+{
+	int ret;
+	ret = rte_dmadev_is_valid_dev(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == false, "Expected false for invalid dev id");
+	ret = rte_dmadev_is_valid_dev(test_dev_id);
+	RTE_TEST_ASSERT(ret == true, "Expected true for valid dev id");
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_count(void)
+{
+	uint16_t count = rte_dmadev_count();
+	RTE_TEST_ASSERT(count > 0, "Invalid dmadev count %u", count);
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_info_get(void)
+{
+	struct rte_dmadev_info info =  { 0 };
+	int ret;
+
+	ret = rte_dmadev_info_get(invalid_dev_id, &info);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_info_get(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_configure(void)
+{
+	struct rte_dmadev_conf conf = { 0 };
+	struct rte_dmadev_info info = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_configure(invalid_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_configure(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for nb_vchans == 0 */
+	memset(&conf, 0, sizeof(conf));
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for conf.nb_vchans > info.max_vchans */
+	ret = rte_dmadev_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans + 1;
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check enable silent mode */
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans;
+	conf.enable_silent = true;
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Configure success */
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans;
+	ret = rte_dmadev_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret);
+
+	/* Check configure success */
+	ret = rte_dmadev_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	RTE_TEST_ASSERT_EQUAL(conf.nb_vchans, info.nb_vchans,
+			      "Configure nb_vchans not match");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_vchan_setup(void)
+{
+	struct rte_dmadev_vchan_conf vchan_conf = { 0 };
+	struct rte_dmadev_conf dev_conf = { 0 };
+	struct rte_dmadev_info dev_info = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_vchan_setup(invalid_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Make sure configure success */
+	ret = rte_dmadev_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	dev_conf.nb_vchans = dev_info.max_vchans;
+	ret = rte_dmadev_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret);
+
+	/* Check for invalid vchan */
+	ret = rte_dmadev_vchan_setup(test_dev_id, dev_conf.nb_vchans,
+				     &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for direction */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV + 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM - 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for direction and dev_capa combination */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_DEV;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_MEM;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for nb_desc validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc - 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.nb_desc = dev_info.max_desc + 1;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check src port type validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	vchan_conf.src_port.port_type = RTE_DMADEV_PORT_PCIE;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check dst port type validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	vchan_conf.dst_port.port_type = RTE_DMADEV_PORT_PCIE;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check vchan setup success */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+setup_one_vchan(void)
+{
+	struct rte_dmadev_vchan_conf vchan_conf = { 0 };
+	struct rte_dmadev_info dev_info = { 0 };
+	struct rte_dmadev_conf dev_conf = { 0 };
+	int ret;
+
+	ret = rte_dmadev_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+	dev_conf.nb_vchans = dev_info.max_vchans;
+	ret = rte_dmadev_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_start_stop(void)
+{
+	struct rte_dmadev_vchan_conf vchan_conf = { 0 };
+	struct rte_dmadev_conf dev_conf = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_start(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stop(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dmadev_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Check reconfigure and vchan setup when device started */
+	ret = rte_dmadev_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT(ret == -EBUSY, "Failed to configure, %d", ret);
+	ret = rte_dmadev_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EBUSY, "Failed to setup vchan, %d", ret);
+
+	ret = rte_dmadev_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_stats(void)
+{
+	struct rte_dmadev_info dev_info = { 0 };
+	struct rte_dmadev_stats stats = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_stats_get(invalid_dev_id, 0, &stats);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stats_get(invalid_dev_id, 0, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stats_reset(invalid_dev_id, 0);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	/* Check for invalid vchan */
+	ret = rte_dmadev_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+	ret = rte_dmadev_stats_get(test_dev_id, dev_info.max_vchans, &stats);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dmadev_stats_reset(test_dev_id, dev_info.max_vchans);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for valid vchan */
+	ret = rte_dmadev_stats_get(test_dev_id, 0, &stats);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get stats, %d", ret);
+	ret = rte_dmadev_stats_get(test_dev_id, RTE_DMADEV_ALL_VCHAN, &stats);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get all stats, %d", ret);
+	ret = rte_dmadev_stats_reset(test_dev_id, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset stats, %d", ret);
+	ret = rte_dmadev_stats_reset(test_dev_id, RTE_DMADEV_ALL_VCHAN);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset all stats, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_dump(void)
+{
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dmadev_dump(invalid_dev_id, stderr);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Excepted -EINVAL, %d", ret);
+	ret = rte_dmadev_dump(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Excepted -EINVAL, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_completed(void)
+{
+	uint16_t last_idx = 1;
+	bool has_error = true;
+	uint16_t cpl_ret;
+	int ret, i;
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dmadev_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Setup test memory */
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++)
+		src[i] = (char)i;
+	memset(dst, 0, TEST_MEMCPY_SIZE);
+
+	/* Check enqueue without submit */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, 0);
+	RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed(test_dev_id, 0, 1, &last_idx,
+				       &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to get completed");
+
+	/* Check add submit */
+	ret = rte_dmadev_submit(test_dev_id, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to submit, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed(test_dev_id, 0, 1, &last_idx,
+				       &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u",
+				last_idx);
+	RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++) {
+		if (src[i] != dst[i]) {
+			RTE_TEST_ASSERT_EQUAL(src[i], dst[i],
+				"Failed to copy memory, %d %d", src[i], dst[i]);
+			break;
+		}
+	}
+
+	/* Setup test memory */
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++)
+		src[i] = (char)i;
+	memset(dst, 0, TEST_MEMCPY_SIZE);
+
+	/* Check for enqueue with submit */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 1, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed(test_dev_id, 0, 1, &last_idx,
+				       &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u",
+				last_idx);
+	RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++) {
+		if (src[i] != dst[i]) {
+			RTE_TEST_ASSERT_EQUAL(src[i], dst[i],
+				"Failed to copy memory, %d %d", src[i], dst[i]);
+			break;
+		}
+	}
+
+	/* Stop dmadev to make sure dmadev to a known state */
+	ret = rte_dmadev_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dmadev_completed_status(void)
+{
+	enum rte_dma_status_code status[1] = { 1 };
+	uint16_t last_idx = 1;
+	uint16_t cpl_ret, i;
+	int ret;
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dmadev_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Check for enqueue with submit */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed_status(test_dev_id, 0, 1, &last_idx,
+					      status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u",
+				last_idx);
+	for (i = 0; i < RTE_DIM(status); i++)
+		RTE_TEST_ASSERT_EQUAL(status[i], 0,
+				"Failed to completed status, %d", status[i]);
+
+	/* Check do completed status again */
+	cpl_ret = rte_dmadev_completed_status(test_dev_id, 0, 1, &last_idx,
+					      status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to completed status");
+
+	/* Check for enqueue with submit again */
+	ret = rte_dmadev_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+				TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 1, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dmadev_completed_status(test_dev_id, 0, 1, &last_idx,
+					      status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u",
+				last_idx);
+	for (i = 0; i < RTE_DIM(status); i++)
+		RTE_TEST_ASSERT_EQUAL(status[i], 0,
+				"Failed to completed status, %d", status[i]);
+
+	/* Stop dmadev to make sure dmadev to a known state */
+	ret = rte_dmadev_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+int
+test_dmadev_api(uint16_t dev_id)
+{
+	int ret = testsuite_setup(dev_id);
+	if (ret) {
+		printf("testsuite setup fail!\n");
+		return -1;
+	}
+
+	/* If the testcase exit successfully, ensure that the test dmadev exist
+	 * and the dmadev is in the stopped state.
+	 */
+	SKELDMA_TEST_RUN(test_dmadev_get_dev_id);
+	SKELDMA_TEST_RUN(test_dmadev_is_valid_dev);
+	SKELDMA_TEST_RUN(test_dmadev_count);
+	SKELDMA_TEST_RUN(test_dmadev_info_get);
+	SKELDMA_TEST_RUN(test_dmadev_configure);
+	SKELDMA_TEST_RUN(test_dmadev_vchan_setup);
+	SKELDMA_TEST_RUN(test_dmadev_start_stop);
+	SKELDMA_TEST_RUN(test_dmadev_stats);
+	SKELDMA_TEST_RUN(test_dmadev_dump);
+	SKELDMA_TEST_RUN(test_dmadev_completed);
+	SKELDMA_TEST_RUN(test_dmadev_completed_status);
+
+	testsuite_teardown();
+
+	printf("Total tests   : %d\n", total);
+	printf("Passed        : %d\n", passed);
+	printf("Failed        : %d\n", failed);
+
+	if (failed)
+		return -1;
+
+	return 0;
+};
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v20 2/7] dmadev: introduce DMA device library internal header
  2021-09-06 13:35     ` Bruce Richardson
@ 2021-09-07 13:05       ` fengchengwen
  0 siblings, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-09-07 13:05 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

Already fix in V21, thanks

On 2021/9/6 21:35, Bruce Richardson wrote:
> On Sat, Sep 04, 2021 at 06:10:22PM +0800, Chengwen Feng wrote:
>> This patch introduce DMA device library internal header, which contains
>> internal data types that are used by the DMA devices in order to expose
>> their ops to the class.
>>
>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
>> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
>> Acked-by: Morten Brørup <mb@smartsharesystems.com>
>> Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
>> Reviewed-by: Conor Walsh <conor.walsh@intel.com>
>> ---
> <snip>
>> +struct rte_dmadev {
>> +	void *dev_private;
>> +	/**< PMD-specific private data.
>> +	 *
>> +	 * - If is the primary process, after dmadev allocated by
>> +	 * rte_dmadev_pmd_allocate(), the PCI/SoC device probing should
>> +	 * initialize this field, and copy it's value to the 'dev_private'
>> +	 * field of 'struct rte_dmadev_data' which pointer by 'data' filed.
>> +	 *
>> +	 * - If is the secondary process, dmadev framework will initialize this
>> +	 * field by copy from 'dev_private' field of 'struct rte_dmadev_data'
>> +	 * which initialized by primary process.
>> +	 *
>> +	 * @note It's the primary process responsibility to deinitialize this
>> +	 * field after invoke rte_dmadev_pmd_release() in the PCI/SoC device
>> +	 * removing stage.
>> +	 */
>> +	rte_dmadev_copy_t             copy;
>> +	rte_dmadev_copy_sg_t          copy_sg;
>> +	rte_dmadev_fill_t             fill;
>> +	rte_dmadev_submit_t           submit;
>> +	rte_dmadev_completed_t        completed;
>> +	rte_dmadev_completed_status_t completed_status;
>> +	void *reserved_ptr[7]; /**< Reserved for future IO function. */
> 
> This is new in this set, I think. I assume that 7 was chosen so that we
> have the "data" pointer and the "dev_ops" pointers on the second cacheline
> (if 64-byte CLs)? If so, I wonder if we can find a good way to express that
> in the code or in the comments?
> 
> The simplest - and probably as clear as any - is to split this into
> "void *__reserved_cl0" and "void *__reserved_cl1[6]" to show that it is
> split across the two cachelines, with the latter having comment:
> "Reserve space for future IO functions, while keeping data and dev_ops
> pointers on the second cacheline"
> 
> If we don't mind using a slightly different type the magic "6" could be
> changed to a computation:
> char __reserved_cl1[RTE_CACHELINE_SZ - sizeof(void *) * 2];
> 
> However, for simplicity, I think the magic 6 can be kept, and just split
> into reserved_cl0 and reserved_cl1 as I suggest above.
> 
> /Bruce
> 
> .
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v21 4/7] dmadev: introduce DMA device library implementation
  2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 4/7] dmadev: introduce DMA device library implementation Chengwen Feng
@ 2021-09-08  9:54     ` Walsh, Conor
  2021-09-09 13:25       ` fengchengwen
  2021-09-15 13:51     ` Kevin Laatz
  1 sibling, 1 reply; 339+ messages in thread
From: Walsh, Conor @ 2021-09-08  9:54 UTC (permalink / raw)
  To: Chengwen Feng, thomas, Yigit, Ferruh, Richardson,  Bruce, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor, Ananyev,
	Konstantin, Laatz, Kevin

<snip>

Hi Chengwen,

While testing the IOAT driver I realised that we hadn't implemented the new RTE_DMADEV_ALL_VCHAN
flag for stats. Rather than every driver that only supports 1 vchan enabling support for this flag it would
probably be better to catch it in the library as shown below.

Thanks,
Conor.

> +int
> +rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
> +		     struct rte_dmadev_stats *stats)
> +{
> +	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	if (stats == NULL)
> +		return -EINVAL;
> +	if (vchan >= dev->data->dev_conf.nb_vchans &&
> +	    vchan != RTE_DMADEV_ALL_VCHAN) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u vchan %u out of range", dev_id, vchan);
> +		return -EINVAL;
> +	}
	if (vchan == RTE_DMADEV_ALL_VCHAN && dev->data->dev_conf.nb_vchans == 1)
		vchan = 0;
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -
> ENOTSUP);
> +	memset(stats, 0, sizeof(struct rte_dmadev_stats));
> +	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
> +					  sizeof(struct rte_dmadev_stats));
> +}
> +
> +int
> +rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> +	if (vchan >= dev->data->dev_conf.nb_vchans &&
> +	    vchan != RTE_DMADEV_ALL_VCHAN) {
> +		RTE_DMADEV_LOG(ERR,
> +			"Device %u vchan %u out of range", dev_id, vchan);
> +		return -EINVAL;
> +	}
	if (vchan == RTE_DMADEV_ALL_VCHAN && dev->data->dev_conf.nb_vchans == 1)
		vchan = 0;
> +
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -
> ENOTSUP);
> +	return (*dev->dev_ops->stats_reset)(dev, vchan);
> +}

<snip>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v21 1/7] dmadev: introduce DMA device library public APIs
  2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 1/7] dmadev: introduce DMA device library public APIs Chengwen Feng
@ 2021-09-09 10:33     ` Thomas Monjalon
  2021-09-09 11:18       ` Bruce Richardson
                         ` (2 more replies)
  0 siblings, 3 replies; 339+ messages in thread
From: Thomas Monjalon @ 2021-09-09 10:33 UTC (permalink / raw)
  To: bruce.richardson, Chengwen Feng
  Cc: ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev, mb,
	nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

Hi,

I am having a surface look at the API.
I hope we can do better than previous libs.

07/09/2021 14:56, Chengwen Feng:
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -496,6 +496,10 @@ F: drivers/raw/skeleton/
>  F: app/test/test_rawdev.c
>  F: doc/guides/prog_guide/rawdev.rst
>  
> +DMA device API - EXPERIMENTAL
> +M: Chengwen Feng <fengchengwen@huawei.com>
> +F: lib/dmadev/


I think it should before (preferably) or after eventdev,
but let's keep rawdev as the last one.

Then please apply the same order in other non-alphabetical lists (doc, meson, etc).

> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2021 HiSilicon Limited.
> + * Copyright(c) 2021 Intel Corporation.
> + * Copyright(c) 2021 Marvell International Ltd.
> + * Copyright(c) 2021 SmartShare Systems.
> + */

No need for final dot in copyright lines.

> +#ifndef _RTE_DMADEV_H_
> +#define _RTE_DMADEV_H_

No need for surrounding underscores.

> +
> +/**
> + * @file rte_dmadev.h
> + *
> + * RTE DMA (Direct Memory Access) device APIs.

RTE has no meaning when used in a sentence.
And given it is a DPDK library, you don't really need to specify.
I would also remove the final "s" as the library is one interface.

> + *
> + * The DMA framework is built on the following model:
> + *
> + *     ---------------   ---------------       ---------------
> + *     | virtual DMA |   | virtual DMA |       | virtual DMA |
> + *     | channel     |   | channel     |       | channel     |
> + *     ---------------   ---------------       ---------------
> + *            |                |                      |
> + *            ------------------                      |
> + *                     |                              |
> + *               ------------                    ------------
> + *               |  dmadev  |                    |  dmadev  |
> + *               ------------                    ------------
> + *                     |                              |
> + *            ------------------               ------------------
> + *            | HW-DMA-channel |               | HW-DMA-channel |
> + *            ------------------               ------------------
> + *                     |                              |
> + *                     --------------------------------
> + *                                     |
> + *                           ---------------------
> + *                           | HW-DMA-Controller |
> + *                           ---------------------

You don't hyphens between the words I think.

> + *
> + * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
> + * each HW-DMA-channel should be represented by a dmadev.
> + *
> + * The dmadev could create multiple virtual DMA channels, each virtual DMA
> + * channel represents a different transfer context. The DMA operation request
> + * must be submitted to the virtual DMA channel. e.g. Application could create
> + * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
> + * virtual DMA channel 1 for memory-to-device transfer scenario.

What is the benefit of virtual channels compared to have separate dmadevs
for the same HW channel?

> + *
> + * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
> + * PCI/SoC device probing phase performed at EAL initialization time. And could

Not clear what is this phase. Do you mean bus probing?

> + * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
> + * phase.

Again what is this phase?
I think freeing should be done only via the "close" function.

> + *
> + * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
> + * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.

I think it is better to use signed int16_t so you can express "none" in the API,
which can simplify some functions and error management.

> + *
> + * The functions exported by the dmadev API to setup a device designated by its
> + * device identifier must be invoked in the following order:
> + *     - rte_dmadev_configure()
> + *     - rte_dmadev_vchan_setup()
> + *     - rte_dmadev_start()
> + *
> + * Then, the application can invoke dataplane APIs to process jobs.

You mean "dataplane functions".

> + *
> + * If the application wants to change the configuration (i.e. invoke
> + * rte_dmadev_configure() or rte_dmadev_vchan_setup()), it must invoke
> + * rte_dmadev_stop() first to stop the device and then do the reconfiguration
> + * before invoking rte_dmadev_start() again. The dataplane APIs should not be

again, APIs -> functions

> + * invoked when the device is stopped.
> + *
> + * Finally, an application can close a dmadev by invoking the
> + * rte_dmadev_close() function.
> + *
> + * The dataplane APIs include two parts:
> + * The first part is the submission of operation requests:
> + *     - rte_dmadev_copy()
> + *     - rte_dmadev_copy_sg()
> + *     - rte_dmadev_fill()
> + *     - rte_dmadev_submit()
> + *
> + * These APIs could work with different virtual DMA channels which have
> + * different contexts.
> + *
> + * The first three APIs are used to submit the operation request to the virtual
> + * DMA channel, if the submission is successful, an uint16_t ring_idx is
> + * returned, otherwise a negative number is returned.

unsigned or negative? looks weird.

> + *
> + * The last API was used to issue doorbell to hardware, and also there are flags
> + * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
> + * same work.

I don't understand this sentence.
You mean rte_dmadev_submit function?
Why past tense "was"?
Why having a redundant function?

> + *
> + * The second part is to obtain the result of requests:
> + *     - rte_dmadev_completed()
> + *         - return the number of operation requests completed successfully.
> + *     - rte_dmadev_completed_status()
> + *         - return the number of operation requests completed.
> + *
> + * @note The two completed APIs also support return the last completed
> + * operation's ring_idx.

Not sure why this note here.

> + * @note If the dmadev works in silent mode (@see RTE_DMADEV_CAPA_SILENT),
> + * application does not invoke the above two completed APIs.
> + *
> + * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
> + * rte_dmadev_fill()) returned, the rules are as follows:

I feel a word is missing above.

> + *     - ring_idx for each virtual DMA channel are independent.
> + *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
> + *       when it reach UINT16_MAX, it wraps back to zero.
> + *     - This ring_idx can be used by applications to track per-operation
> + *       metadata in an application-defined circular ring.
> + *     - The initial ring_idx of a virtual DMA channel is zero, after the
> + *       device is stopped, the ring_idx needs to be reset to zero.
> + *
> + * One example:
> + *     - step-1: start one dmadev
> + *     - step-2: enqueue a copy operation, the ring_idx return is 0
> + *     - step-3: enqueue a copy operation again, the ring_idx return is 1
> + *     - ...
> + *     - step-101: stop the dmadev
> + *     - step-102: start the dmadev
> + *     - step-103: enqueue a copy operation, the ring_idx return is 0
> + *     - ...
> + *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
> + *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
> + *     - ...
> + *
> + * The DMA operation address used in enqueue APIs (i.e. rte_dmadev_copy(),
> + * rte_dmadev_copy_sg(), rte_dmadev_fill()) defined as rte_iova_t type. The

"is" defined

> + * dmadev supports two types of address: memory address and device address.

Please try to start new sentences on a new line.

> + *
> + * - memory address: the source and destination address of the memory-to-memory
> + * transfer type, or the source address of the memory-to-device transfer type,
> + * or the destination address of the device-to-memory transfer type.
> + * @note If the device support SVA (@see RTE_DMADEV_CAPA_SVA), the memory
> + * address can be any VA address, otherwise it must be an IOVA address.
> + *
> + * - device address: the source and destination address of the device-to-device
> + * transfer type, or the source address of the device-to-memory transfer type,
> + * or the destination address of the memory-to-device transfer type.
> + *
> + * By default, all the functions of the dmadev API exported by a PMD are

What do you mean "by default"? Is there some exceptions?

> + * lock-free functions which assume to not be invoked in parallel on different
> + * logical cores to work on the same target dmadev object.
> + * @note Different virtual DMA channels on the same dmadev *DO NOT* support
> + * parallel invocation because these virtual DMA channels share the same
> + * HW-DMA-channel.
> + *
> + */
[...]
> +#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN

Why not using RTE_DEV_NAME_MAX_LEN directly?
If you keep it, it should be commented, explaining whether it takes '\0'
into account or not.

> +__rte_experimental
> +bool
> +rte_dmadev_is_valid_dev(uint16_t dev_id);

I would suggest dropping the final "_dev" in the function name.


> +uint16_t
> +rte_dmadev_count(void);

It would be safer to name it rte_dmadev_count_avail
in case we need new kind of device count later.

> +
> +/* Enumerates DMA device capabilities. */

You should group them with a doxygen group syntax.
See https://patches.dpdk.org/project/dpdk/patch/20210830104232.598703-1-thomas@monjalon.net/

> +#define RTE_DMADEV_CAPA_MEM_TO_MEM	(1ull << 0)

Please use RTE_BIT macro (32 or 64).

> +/**< DMA device support memory-to-memory transfer.
> + *
> + * @see struct rte_dmadev_info::dev_capa
> + */

It is preferred to have documentation before the item.

[...]

> +/**
> + * A structure used to retrieve the information of a DMA device.
> + */
> +struct rte_dmadev_info {
> +	struct rte_device *device; /**< Generic Device information. */

Please do not expose this.

> +	uint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */
> +	uint16_t max_vchans;
> +	/**< Maximum number of virtual DMA channels supported. */
> +	uint16_t max_desc;
> +	/**< Maximum allowed number of virtual DMA channel descriptors. */
> +	uint16_t min_desc;
> +	/**< Minimum allowed number of virtual DMA channel descriptors. */
> +	uint16_t max_sges;
> +	/**< Maximum number of source or destination scatter-gather entry
> +	 * supported.
> +	 * If the device does not support COPY_SG capability, this value can be
> +	 * zero.
> +	 * If the device supports COPY_SG capability, then rte_dmadev_copy_sg()
> +	 * parameter nb_src/nb_dst should not exceed this value.
> +	 */
> +	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */

What about adding NUMA node?

    /* Local NUMA memory ID. -1 if unknown. */
    int16_t numa_node;

> +};

> +int
> +rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);

In .h files, the return type should not be on a separate line.

> +
> +/**
> + * A structure used to configure a DMA device.
> + */

You should mention where it is used with @see.

> +struct rte_dmadev_conf {
> +	uint16_t nb_vchans;
> +	/**< The number of virtual DMA channels to set up for the DMA device.
> +	 * This value cannot be greater than the field 'max_vchans' of struct
> +	 * rte_dmadev_info which get from rte_dmadev_info_get().
> +	 */
> +	bool enable_silent;
> +	/**< Indicates whether to enable silent mode.
> +	 * false-default mode, true-silent mode.
> +	 * This value can be set to true only when the SILENT capability is
> +	 * supported.
> +	 *
> +	 * @see RTE_DMADEV_CAPA_SILENT
> +	 */
> +};
[...]
> +#define RTE_DMADEV_ALL_VCHAN	0xFFFFu

Please do not add this kind of constant without a doxygen comment.


It seems you don't manage the maximum number of devices.
It is fixed in the .c file:
	struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];

Instead I would suggest a more dynamic approach with an init function,
so the application can extend it before calling rte_eal_init.
Please see how it is implemented here:
https://patches.dpdk.org/project/dpdk/patch/20210730135533.417611-2-thomas@monjalon.net/

Above patch could also inspire you to start some docs in this patch.


This series add one file per patch.
Instead it would be better to have groups of features per patch,
meaning the implementation and the driver interface should be
in the same patch.
You can split like this:
	1/ device allocation
	2/ configuration and start/stop
	3/ dataplane functions

I would suggest 2 more patches:
	4/ event notification
see https://patches.dpdk.org/project/dpdk/patch/20210730135533.417611-3-thomas@monjalon.net/
	5/ multi-process
see https://patches.dpdk.org/project/dpdk/patch/20210730135533.417611-5-thomas@monjalon.net/


Thanks for the work



^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v21 1/7] dmadev: introduce DMA device library public APIs
  2021-09-09 10:33     ` Thomas Monjalon
@ 2021-09-09 11:18       ` Bruce Richardson
  2021-09-09 11:29         ` Thomas Monjalon
  2021-09-09 13:33       ` fengchengwen
  2021-09-16  3:57       ` fengchengwen
  2 siblings, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-09-09 11:18 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: Chengwen Feng, ferruh.yigit, jerinj, jerinjacobk,
	andrew.rybchenko, dev, mb, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh, kevin.laatz

On Thu, Sep 09, 2021 at 12:33:00PM +0200, Thomas Monjalon wrote:
> Hi,
> 
> I am having a surface look at the API.
> I hope we can do better than previous libs.
> 
A few bits of feedback on your comments and the API below.

/Bruce

> 07/09/2021 14:56, Chengwen Feng:
> > --- a/MAINTAINERS
> > +++ b/MAINTAINERS
> > @@ -496,6 +496,10 @@ F: drivers/raw/skeleton/
> >  F: app/test/test_rawdev.c
> >  F: doc/guides/prog_guide/rawdev.rst
> >  
> > +DMA device API - EXPERIMENTAL
> > +M: Chengwen Feng <fengchengwen@huawei.com>
> > +F: lib/dmadev/
> 
> 
> I think it should before (preferably) or after eventdev,
> but let's keep rawdev as the last one.
> 
> Then please apply the same order in other non-alphabetical lists (doc, meson, etc).
> 
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2021 HiSilicon Limited.
> > + * Copyright(c) 2021 Intel Corporation.
> > + * Copyright(c) 2021 Marvell International Ltd.
> > + * Copyright(c) 2021 SmartShare Systems.
> > + */
> 
> No need for final dot in copyright lines.
> 
> > +#ifndef _RTE_DMADEV_H_
> > +#define _RTE_DMADEV_H_
> 
> No need for surrounding underscores.
> 
> > +
> > +/**
> > + * @file rte_dmadev.h
> > + *
> > + * RTE DMA (Direct Memory Access) device APIs.
> 
> RTE has no meaning when used in a sentence.
> And given it is a DPDK library, you don't really need to specify.
> I would also remove the final "s" as the library is one interface.
> 
> > + *
> > + * The DMA framework is built on the following model:
> > + *
> > + *     ---------------   ---------------       ---------------
> > + *     | virtual DMA |   | virtual DMA |       | virtual DMA |
> > + *     | channel     |   | channel     |       | channel     |
> > + *     ---------------   ---------------       ---------------
> > + *            |                |                      |
> > + *            ------------------                      |
> > + *                     |                              |
> > + *               ------------                    ------------
> > + *               |  dmadev  |                    |  dmadev  |
> > + *               ------------                    ------------
> > + *                     |                              |
> > + *            ------------------               ------------------
> > + *            | HW-DMA-channel |               | HW-DMA-channel |
> > + *            ------------------               ------------------
> > + *                     |                              |
> > + *                     --------------------------------
> > + *                                     |
> > + *                           ---------------------
> > + *                           | HW-DMA-Controller |
> > + *                           ---------------------
> 
> You don't hyphens between the words I think.
> 
> > + *
> > + * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
> > + * each HW-DMA-channel should be represented by a dmadev.
> > + *
> > + * The dmadev could create multiple virtual DMA channels, each virtual DMA
> > + * channel represents a different transfer context. The DMA operation request
> > + * must be submitted to the virtual DMA channel. e.g. Application could create
> > + * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
> > + * virtual DMA channel 1 for memory-to-device transfer scenario.
> 
> What is the benefit of virtual channels compared to have separate dmadevs
> for the same HW channel?
> 
> > + *
> > + * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
> > + * PCI/SoC device probing phase performed at EAL initialization time. And could
> 
> Not clear what is this phase. Do you mean bus probing?
> 
> > + * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
> > + * phase.
> 
> Again what is this phase?
> I think freeing should be done only via the "close" function.
> 
> > + *
> > + * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
> > + * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
> 
> I think it is better to use signed int16_t so you can express "none" in the API,
> which can simplify some functions and error management.
> 
> > + *
> > + * The functions exported by the dmadev API to setup a device designated by its
> > + * device identifier must be invoked in the following order:
> > + *     - rte_dmadev_configure()
> > + *     - rte_dmadev_vchan_setup()
> > + *     - rte_dmadev_start()
> > + *
> > + * Then, the application can invoke dataplane APIs to process jobs.
> 
> You mean "dataplane functions".
> 
> > + *
> > + * If the application wants to change the configuration (i.e. invoke
> > + * rte_dmadev_configure() or rte_dmadev_vchan_setup()), it must invoke
> > + * rte_dmadev_stop() first to stop the device and then do the reconfiguration
> > + * before invoking rte_dmadev_start() again. The dataplane APIs should not be
> 
> again, APIs -> functions
> 
> > + * invoked when the device is stopped.
> > + *
> > + * Finally, an application can close a dmadev by invoking the
> > + * rte_dmadev_close() function.
> > + *
> > + * The dataplane APIs include two parts:
> > + * The first part is the submission of operation requests:
> > + *     - rte_dmadev_copy()
> > + *     - rte_dmadev_copy_sg()
> > + *     - rte_dmadev_fill()
> > + *     - rte_dmadev_submit()
> > + *
> > + * These APIs could work with different virtual DMA channels which have
> > + * different contexts.
> > + *
> > + * The first three APIs are used to submit the operation request to the virtual
> > + * DMA channel, if the submission is successful, an uint16_t ring_idx is
> > + * returned, otherwise a negative number is returned.
> 
> unsigned or negative? looks weird.
> 

May be, but it works well. We could perhaps rephase to make it less weird
though:
"if the submission is successful, a positive ring_idx <= UINT16_MAX is
 returned, otherwise a negative number is returned."

> > + *
> > + * The last API was used to issue doorbell to hardware, and also there are flags
> > + * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
> > + * same work.
> 
> I don't understand this sentence.
> You mean rte_dmadev_submit function?
> Why past tense "was"?
> Why having a redundant function?
> 

Just because there are two ways to do something does not mean that one of
them is redundant, as both may be more suitable for different situations.
When enqueuing a set of jobs to the device, having a separate submit
outside a loop makes for clearer code than having a check for the last
iteration inside the loop to set a special submit flag.  However, for cases
where one item alone is to be submitted or there is a small set of jobs to
be submitted sequentially, having a submit flag provides a lower-overhead
way of doing the submission while still keeping the code clean.

> > + *
> > + * The second part is to obtain the result of requests:
> > + *     - rte_dmadev_completed()
> > + *         - return the number of operation requests completed successfully.
> > + *     - rte_dmadev_completed_status()
> > + *         - return the number of operation requests completed.
> > + *
> > + * @note The two completed APIs also support return the last completed
> > + * operation's ring_idx.
> 
> Not sure why this note here.
> 
> > + * @note If the dmadev works in silent mode (@see RTE_DMADEV_CAPA_SILENT),
> > + * application does not invoke the above two completed APIs.
> > + *
> > + * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
> > + * rte_dmadev_fill()) returned, the rules are as follows:
> 
> I feel a word is missing above.
> 
> > + *     - ring_idx for each virtual DMA channel are independent.
> > + *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
> > + *       when it reach UINT16_MAX, it wraps back to zero.
> > + *     - This ring_idx can be used by applications to track per-operation
> > + *       metadata in an application-defined circular ring.
> > + *     - The initial ring_idx of a virtual DMA channel is zero, after the
> > + *       device is stopped, the ring_idx needs to be reset to zero.
> > + *
> > + * One example:
> > + *     - step-1: start one dmadev
> > + *     - step-2: enqueue a copy operation, the ring_idx return is 0
> > + *     - step-3: enqueue a copy operation again, the ring_idx return is 1
> > + *     - ...
> > + *     - step-101: stop the dmadev
> > + *     - step-102: start the dmadev
> > + *     - step-103: enqueue a copy operation, the ring_idx return is 0
> > + *     - ...
> > + *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
> > + *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
> > + *     - ...
> > + *
> > + * The DMA operation address used in enqueue APIs (i.e. rte_dmadev_copy(),
> > + * rte_dmadev_copy_sg(), rte_dmadev_fill()) defined as rte_iova_t type. The
> 
> "is" defined
> 
> > + * dmadev supports two types of address: memory address and device address.
> 
> Please try to start new sentences on a new line.
> 
> > + *
> > + * - memory address: the source and destination address of the memory-to-memory
> > + * transfer type, or the source address of the memory-to-device transfer type,
> > + * or the destination address of the device-to-memory transfer type.
> > + * @note If the device support SVA (@see RTE_DMADEV_CAPA_SVA), the memory
> > + * address can be any VA address, otherwise it must be an IOVA address.
> > + *
> > + * - device address: the source and destination address of the device-to-device
> > + * transfer type, or the source address of the device-to-memory transfer type,
> > + * or the destination address of the memory-to-device transfer type.
> > + *
> > + * By default, all the functions of the dmadev API exported by a PMD are
> 
> What do you mean "by default"? Is there some exceptions?
> 
> > + * lock-free functions which assume to not be invoked in parallel on different
> > + * logical cores to work on the same target dmadev object.
> > + * @note Different virtual DMA channels on the same dmadev *DO NOT* support
> > + * parallel invocation because these virtual DMA channels share the same
> > + * HW-DMA-channel.
> > + *
> > + */
> [...]
> > +#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
> 
> Why not using RTE_DEV_NAME_MAX_LEN directly?
> If you keep it, it should be commented, explaining whether it takes '\0'
> into account or not.
> 
> > +__rte_experimental
> > +bool
> > +rte_dmadev_is_valid_dev(uint16_t dev_id);
> 
> I would suggest dropping the final "_dev" in the function name.
> 

The alternative, which I would support, is replacing "rte_dmadev" with
"rte_dma" across the API. This would then become "rte_dma_is_valid_dev"
which is clearer, since the dev is not part of the standard prefix. It also
would fit in with a possible future function of "rte_dma_is_valid_vchan"
for instance.

> 
> > +uint16_t
> > +rte_dmadev_count(void);
> 
> It would be safer to name it rte_dmadev_count_avail
> in case we need new kind of device count later.
> 

If we change "dmadev" to "dma" this could then be
"rte_dma_count_avail_dev".

> > +
> > +/* Enumerates DMA device capabilities. */
> 
> You should group them with a doxygen group syntax.
> See https://patches.dpdk.org/project/dpdk/patch/20210830104232.598703-1-thomas@monjalon.net/
> 
> > +#define RTE_DMADEV_CAPA_MEM_TO_MEM	(1ull << 0)
> 
> Please use RTE_BIT macro (32 or 64).
> 
> > +/**< DMA device support memory-to-memory transfer.
> > + *
> > + * @see struct rte_dmadev_info::dev_capa
> > + */
> 
> It is preferred to have documentation before the item.
> 
> [...]
> 
> > +/**
> > + * A structure used to retrieve the information of a DMA device.
> > + */
> > +struct rte_dmadev_info {
> > +	struct rte_device *device; /**< Generic Device information. */
> 
> Please do not expose this.
> 
> > +	uint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */
> > +	uint16_t max_vchans;
> > +	/**< Maximum number of virtual DMA channels supported. */
> > +	uint16_t max_desc;
> > +	/**< Maximum allowed number of virtual DMA channel descriptors. */
> > +	uint16_t min_desc;
> > +	/**< Minimum allowed number of virtual DMA channel descriptors. */
> > +	uint16_t max_sges;
> > +	/**< Maximum number of source or destination scatter-gather entry
> > +	 * supported.
> > +	 * If the device does not support COPY_SG capability, this value can be
> > +	 * zero.
> > +	 * If the device supports COPY_SG capability, then rte_dmadev_copy_sg()
> > +	 * parameter nb_src/nb_dst should not exceed this value.
> > +	 */
> > +	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
> 
> What about adding NUMA node?
> 
>     /* Local NUMA memory ID. -1 if unknown. */
>     int16_t numa_node;
> 

That was omitted as it could be got through the device structure. If device
is removed, we need to ensure all fields needed from device, such as numa
node, are made available here.

> > +};
> 
> > +int
> > +rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
>
<snip> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v21 1/7] dmadev: introduce DMA device library public APIs
  2021-09-09 11:18       ` Bruce Richardson
@ 2021-09-09 11:29         ` Thomas Monjalon
  2021-09-09 12:45           ` Bruce Richardson
  0 siblings, 1 reply; 339+ messages in thread
From: Thomas Monjalon @ 2021-09-09 11:29 UTC (permalink / raw)
  To: Bruce Richardson, Chengwen Feng
  Cc: ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev, mb,
	nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

09/09/2021 13:18, Bruce Richardson:
> On Thu, Sep 09, 2021 at 12:33:00PM +0200, Thomas Monjalon wrote:
> > 07/09/2021 14:56, Chengwen Feng:
> > > + * The first three APIs are used to submit the operation request to the virtual
> > > + * DMA channel, if the submission is successful, an uint16_t ring_idx is
> > > + * returned, otherwise a negative number is returned.
> > 
> > unsigned or negative? looks weird.
> 
> May be, but it works well. We could perhaps rephase to make it less weird
> though:
> "if the submission is successful, a positive ring_idx <= UINT16_MAX is
>  returned, otherwise a negative number is returned."

I am advocating for int16_t,
it makes a lot of things simpler.

> > > + *
> > > + * The last API was used to issue doorbell to hardware, and also there are flags
> > > + * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
> > > + * same work.
> > 
> > I don't understand this sentence.
> > You mean rte_dmadev_submit function?
> > Why past tense "was"?
> > Why having a redundant function?
> > 
> 
> Just because there are two ways to do something does not mean that one of
> them is redundant, as both may be more suitable for different situations.

I agree.

> When enqueuing a set of jobs to the device, having a separate submit
> outside a loop makes for clearer code than having a check for the last
> iteration inside the loop to set a special submit flag.  However, for cases
> where one item alone is to be submitted or there is a small set of jobs to
> be submitted sequentially, having a submit flag provides a lower-overhead
> way of doing the submission while still keeping the code clean.

This kind of explanation may be missing in doxygen?

> > > +bool
> > > +rte_dmadev_is_valid_dev(uint16_t dev_id);
> > 
> > I would suggest dropping the final "_dev" in the function name.
> > 
> 
> The alternative, which I would support, is replacing "rte_dmadev" with
> "rte_dma" across the API. This would then become "rte_dma_is_valid_dev"
> which is clearer, since the dev is not part of the standard prefix. It also
> would fit in with a possible future function of "rte_dma_is_valid_vchan"
> for instance.

Yes
The question is whether it would make sense to reserver rte_dma_ prefix
for some DMA functions which would be outside of dmadev lib?
If you think that all DMA functions will be in dmadev,
then yes we can shorten the prefix to rte_dma_.

> > > +uint16_t
> > > +rte_dmadev_count(void);
> > 
> > It would be safer to name it rte_dmadev_count_avail
> > in case we need new kind of device count later.
> > 
> 
> If we change "dmadev" to "dma" this could then be
> "rte_dma_count_avail_dev".

Yes

> > > +/**
> > > + * A structure used to retrieve the information of a DMA device.
> > > + */
> > > +struct rte_dmadev_info {
> > > +	struct rte_device *device; /**< Generic Device information. */
> > 
> > Please do not expose this.
> > 
> > > +	uint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */
> > > +	uint16_t max_vchans;
> > > +	/**< Maximum number of virtual DMA channels supported. */
> > > +	uint16_t max_desc;
> > > +	/**< Maximum allowed number of virtual DMA channel descriptors. */
> > > +	uint16_t min_desc;
> > > +	/**< Minimum allowed number of virtual DMA channel descriptors. */
> > > +	uint16_t max_sges;
> > > +	/**< Maximum number of source or destination scatter-gather entry
> > > +	 * supported.
> > > +	 * If the device does not support COPY_SG capability, this value can be
> > > +	 * zero.
> > > +	 * If the device supports COPY_SG capability, then rte_dmadev_copy_sg()
> > > +	 * parameter nb_src/nb_dst should not exceed this value.
> > > +	 */
> > > +	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
> > 
> > What about adding NUMA node?
> > 
> >     /* Local NUMA memory ID. -1 if unknown. */
> >     int16_t numa_node;
> > 
> 
> That was omitted as it could be got through the device structure. If device
> is removed, we need to ensure all fields needed from device, such as numa
> node, are made available here.

Ah yes, forgot about rte_device :)
Yes please remove rte_device from this struct.




^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v21 1/7] dmadev: introduce DMA device library public APIs
  2021-09-09 11:29         ` Thomas Monjalon
@ 2021-09-09 12:45           ` Bruce Richardson
  2021-09-09 13:54             ` fengchengwen
  0 siblings, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-09-09 12:45 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: Chengwen Feng, ferruh.yigit, jerinj, jerinjacobk,
	andrew.rybchenko, dev, mb, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh, kevin.laatz

On Thu, Sep 09, 2021 at 01:29:33PM +0200, Thomas Monjalon wrote:
> 09/09/2021 13:18, Bruce Richardson:
> > On Thu, Sep 09, 2021 at 12:33:00PM +0200, Thomas Monjalon wrote:
> > > 07/09/2021 14:56, Chengwen Feng:
> > > > + * The first three APIs are used to submit the operation request to the virtual
> > > > + * DMA channel, if the submission is successful, an uint16_t ring_idx is
> > > > + * returned, otherwise a negative number is returned.
> > > 
> > > unsigned or negative? looks weird.
> > 
> > May be, but it works well. We could perhaps rephase to make it less weird
> > though:
> > "if the submission is successful, a positive ring_idx <= UINT16_MAX is
> >  returned, otherwise a negative number is returned."
> 
> I am advocating for int16_t,
> it makes a lot of things simpler.
> 

No, it doesn't work as you can't have wrap-around of the IDs once you use
signed values - and that impacts both the end app and the internals of the
drivers. Let's keep it as-is otherwise it will have massive impacts -
including potential perf impacts.

> > > > + *
> > > > + * The last API was used to issue doorbell to hardware, and also there are flags
> > > > + * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
> > > > + * same work.
> > > 
> > > I don't understand this sentence.
> > > You mean rte_dmadev_submit function?
> > > Why past tense "was"?
> > > Why having a redundant function?
> > > 
> > 
> > Just because there are two ways to do something does not mean that one of
> > them is redundant, as both may be more suitable for different situations.
> 
> I agree.
> 
> > When enqueuing a set of jobs to the device, having a separate submit
> > outside a loop makes for clearer code than having a check for the last
> > iteration inside the loop to set a special submit flag.  However, for cases
> > where one item alone is to be submitted or there is a small set of jobs to
> > be submitted sequentially, having a submit flag provides a lower-overhead
> > way of doing the submission while still keeping the code clean.
> 
> This kind of explanation may be missing in doxygen?
> 

It can be added, sure.

> > > > +bool
> > > > +rte_dmadev_is_valid_dev(uint16_t dev_id);
> > > 
> > > I would suggest dropping the final "_dev" in the function name.
> > > 
> > 
> > The alternative, which I would support, is replacing "rte_dmadev" with
> > "rte_dma" across the API. This would then become "rte_dma_is_valid_dev"
> > which is clearer, since the dev is not part of the standard prefix. It also
> > would fit in with a possible future function of "rte_dma_is_valid_vchan"
> > for instance.
> 
> Yes
> The question is whether it would make sense to reserver rte_dma_ prefix
> for some DMA functions which would be outside of dmadev lib?
> If you think that all DMA functions will be in dmadev,
> then yes we can shorten the prefix to rte_dma_.
> 

Well, any DPDK dma functions which are not in dma library should have the
prefix of the library they are in e.g. rte_eal_dma_*, rte_pci_dma_*
Therefore, I don't think name conflicts should be an issue, and I like
having less typing to do in function names (and I believe Morten was
strongly proposing this previously too)

> > > > +uint16_t
> > > > +rte_dmadev_count(void);
> > > 
> > > It would be safer to name it rte_dmadev_count_avail
> > > in case we need new kind of device count later.
> > > 
> > 
> > If we change "dmadev" to "dma" this could then be
> > "rte_dma_count_avail_dev".
> 
> Yes
> 
> > > > +/**
> > > > + * A structure used to retrieve the information of a DMA device.
> > > > + */
> > > > +struct rte_dmadev_info {
> > > > +	struct rte_device *device; /**< Generic Device information. */
> > > 
> > > Please do not expose this.
> > > 
> > > > +	uint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */
> > > > +	uint16_t max_vchans;
> > > > +	/**< Maximum number of virtual DMA channels supported. */
> > > > +	uint16_t max_desc;
> > > > +	/**< Maximum allowed number of virtual DMA channel descriptors. */
> > > > +	uint16_t min_desc;
> > > > +	/**< Minimum allowed number of virtual DMA channel descriptors. */
> > > > +	uint16_t max_sges;
> > > > +	/**< Maximum number of source or destination scatter-gather entry
> > > > +	 * supported.
> > > > +	 * If the device does not support COPY_SG capability, this value can be
> > > > +	 * zero.
> > > > +	 * If the device supports COPY_SG capability, then rte_dmadev_copy_sg()
> > > > +	 * parameter nb_src/nb_dst should not exceed this value.
> > > > +	 */
> > > > +	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
> > > 
> > > What about adding NUMA node?
> > > 
> > >     /* Local NUMA memory ID. -1 if unknown. */
> > >     int16_t numa_node;
> > > 
> > 
> > That was omitted as it could be got through the device structure. If device
> > is removed, we need to ensure all fields needed from device, such as numa
> > node, are made available here.
> 
> Ah yes, forgot about rte_device :)
> Yes please remove rte_device from this struct.
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v21 4/7] dmadev: introduce DMA device library implementation
  2021-09-08  9:54     ` Walsh, Conor
@ 2021-09-09 13:25       ` fengchengwen
  0 siblings, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-09-09 13:25 UTC (permalink / raw)
  To: Walsh, Conor, thomas, Yigit, Ferruh, Richardson,  Bruce, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor, Ananyev,
	Konstantin, Laatz, Kevin

For the dmadev which only support one vchan, it's OK to ignore vchan parameters.
I don't think that logic needs to be added.

On 2021/9/8 17:54, Walsh, Conor wrote:
> <snip>
> 
> Hi Chengwen,
> 
> While testing the IOAT driver I realised that we hadn't implemented the new RTE_DMADEV_ALL_VCHAN
> flag for stats. Rather than every driver that only supports 1 vchan enabling support for this flag it would
> probably be better to catch it in the library as shown below.
> 
> Thanks,
> Conor.
> 
>> +int
>> +rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
>> +		     struct rte_dmadev_stats *stats)
>> +{
>> +	const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
>> +
>> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
>> +	if (stats == NULL)
>> +		return -EINVAL;
>> +	if (vchan >= dev->data->dev_conf.nb_vchans &&
>> +	    vchan != RTE_DMADEV_ALL_VCHAN) {
>> +		RTE_DMADEV_LOG(ERR,
>> +			"Device %u vchan %u out of range", dev_id, vchan);
>> +		return -EINVAL;
>> +	}
> 	if (vchan == RTE_DMADEV_ALL_VCHAN && dev->data->dev_conf.nb_vchans == 1)
> 		vchan = 0;
>> +
>> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -
>> ENOTSUP);
>> +	memset(stats, 0, sizeof(struct rte_dmadev_stats));
>> +	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
>> +					  sizeof(struct rte_dmadev_stats));
>> +}
>> +
>> +int
>> +rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
>> +{
>> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
>> +
>> +	RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
>> +	if (vchan >= dev->data->dev_conf.nb_vchans &&
>> +	    vchan != RTE_DMADEV_ALL_VCHAN) {
>> +		RTE_DMADEV_LOG(ERR,
>> +			"Device %u vchan %u out of range", dev_id, vchan);
>> +		return -EINVAL;
>> +	}
> 	if (vchan == RTE_DMADEV_ALL_VCHAN && dev->data->dev_conf.nb_vchans == 1)
> 		vchan = 0;
>> +
>> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -
>> ENOTSUP);
>> +	return (*dev->dev_ops->stats_reset)(dev, vchan);
>> +}
> 
> <snip>
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v21 1/7] dmadev: introduce DMA device library public APIs
  2021-09-09 10:33     ` Thomas Monjalon
  2021-09-09 11:18       ` Bruce Richardson
@ 2021-09-09 13:33       ` fengchengwen
  2021-09-09 14:19         ` Thomas Monjalon
  2021-09-16  3:57       ` fengchengwen
  2 siblings, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-09-09 13:33 UTC (permalink / raw)
  To: Thomas Monjalon, bruce.richardson
  Cc: ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev, mb,
	nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

Thanks for reviewing, mostly OK, a few inlined.

On 2021/9/9 18:33, Thomas Monjalon wrote:
> Hi,
> 
> I am having a surface look at the API.
> I hope we can do better than previous libs.
> 
> 07/09/2021 14:56, Chengwen Feng:
>> --- a/MAINTAINERS
>> +++ b/MAINTAINERS
>> @@ -496,6 +496,10 @@ F: drivers/raw/skeleton/
>>  F: app/test/test_rawdev.c
>>  F: doc/guides/prog_guide/rawdev.rst
>>  
>> +DMA device API - EXPERIMENTAL
>> +M: Chengwen Feng <fengchengwen@huawei.com>
>> +F: lib/dmadev/
> 
> 
> I think it should before (preferably) or after eventdev,
> but let's keep rawdev as the last one.
> 
> Then please apply the same order in other non-alphabetical lists (doc, meson, etc).
> 
>> +/* SPDX-License-Identifier: BSD-3-Clause
>> + * Copyright(c) 2021 HiSilicon Limited.
>> + * Copyright(c) 2021 Intel Corporation.
>> + * Copyright(c) 2021 Marvell International Ltd.
>> + * Copyright(c) 2021 SmartShare Systems.
>> + */
> 
> No need for final dot in copyright lines.
> 
>> +#ifndef _RTE_DMADEV_H_
>> +#define _RTE_DMADEV_H_
> 
> No need for surrounding underscores.
> 
>> +
>> +/**
>> + * @file rte_dmadev.h
>> + *
>> + * RTE DMA (Direct Memory Access) device APIs.
> 
> RTE has no meaning when used in a sentence.
> And given it is a DPDK library, you don't really need to specify.
> I would also remove the final "s" as the library is one interface.
> 
>> + *
>> + * The DMA framework is built on the following model:
>> + *
>> + *     ---------------   ---------------       ---------------
>> + *     | virtual DMA |   | virtual DMA |       | virtual DMA |
>> + *     | channel     |   | channel     |       | channel     |
>> + *     ---------------   ---------------       ---------------
>> + *            |                |                      |
>> + *            ------------------                      |
>> + *                     |                              |
>> + *               ------------                    ------------
>> + *               |  dmadev  |                    |  dmadev  |
>> + *               ------------                    ------------
>> + *                     |                              |
>> + *            ------------------               ------------------
>> + *            | HW-DMA-channel |               | HW-DMA-channel |
>> + *            ------------------               ------------------
>> + *                     |                              |
>> + *                     --------------------------------
>> + *                                     |
>> + *                           ---------------------
>> + *                           | HW-DMA-Controller |
>> + *                           ---------------------
> 
> You don't hyphens between the words I think.
> 
>> + *
>> + * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
>> + * each HW-DMA-channel should be represented by a dmadev.
>> + *
>> + * The dmadev could create multiple virtual DMA channels, each virtual DMA
>> + * channel represents a different transfer context. The DMA operation request
>> + * must be submitted to the virtual DMA channel. e.g. Application could create
>> + * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
>> + * virtual DMA channel 1 for memory-to-device transfer scenario.
> 
> What is the benefit of virtual channels compared to have separate dmadevs
> for the same HW channel?

This design is from the previous discussion [1]. If a dmadev is created for each
virtual channel, there are many associations between the dmadevs. For example,
channel operations of some devices need to interact with the kernel, the corresponding
kernel operation handles need to be shared among multiple dmadevs. It's going to get
more complicated.

[1] https://lore.kernel.org/dpdk-dev/c4a0ee30-f7b8-f8a1-463c-8eedaec82aea@huawei.com/

> 
>> + *
>> + * The dmadev are dynamically allocated by rte_dmadev_pmd_allocate() during the
>> + * PCI/SoC device probing phase performed at EAL initialization time. And could
> 
> Not clear what is this phase. Do you mean bus probing?

yes, it's bus probing

> 
>> + * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
>> + * phase.
> 
> Again what is this phase?
> I think freeing should be done only via the "close" function.

OK
The allocate/release will be reconstructed with reference to rte_eth_dev_pci_generic_probe.

> 
>> + *
>> + * This framework uses 'uint16_t dev_id' as the device identifier of a dmadev,
>> + * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
> 
> I think it is better to use signed int16_t so you can express "none" in the API,
> which can simplify some functions and error management.
> 
>> + *
>> + * The functions exported by the dmadev API to setup a device designated by its
>> + * device identifier must be invoked in the following order:
>> + *     - rte_dmadev_configure()
>> + *     - rte_dmadev_vchan_setup()
>> + *     - rte_dmadev_start()
>> + *
>> + * Then, the application can invoke dataplane APIs to process jobs.
> 
> You mean "dataplane functions".
> 
>> + *
>> + * If the application wants to change the configuration (i.e. invoke
>> + * rte_dmadev_configure() or rte_dmadev_vchan_setup()), it must invoke
>> + * rte_dmadev_stop() first to stop the device and then do the reconfiguration
>> + * before invoking rte_dmadev_start() again. The dataplane APIs should not be
> 
> again, APIs -> functions
> 
>> + * invoked when the device is stopped.
>> + *
>> + * Finally, an application can close a dmadev by invoking the
>> + * rte_dmadev_close() function.
>> + *
>> + * The dataplane APIs include two parts:
>> + * The first part is the submission of operation requests:
>> + *     - rte_dmadev_copy()
>> + *     - rte_dmadev_copy_sg()
>> + *     - rte_dmadev_fill()
>> + *     - rte_dmadev_submit()
>> + *
>> + * These APIs could work with different virtual DMA channels which have
>> + * different contexts.
>> + *
>> + * The first three APIs are used to submit the operation request to the virtual
>> + * DMA channel, if the submission is successful, an uint16_t ring_idx is
>> + * returned, otherwise a negative number is returned.
> 
> unsigned or negative? looks weird.
> 
>> + *
>> + * The last API was used to issue doorbell to hardware, and also there are flags
>> + * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
>> + * same work.
> 
> I don't understand this sentence.
> You mean rte_dmadev_submit function?
> Why past tense "was"?
> Why having a redundant function?
> 
>> + *
>> + * The second part is to obtain the result of requests:
>> + *     - rte_dmadev_completed()
>> + *         - return the number of operation requests completed successfully.
>> + *     - rte_dmadev_completed_status()
>> + *         - return the number of operation requests completed.
>> + *
>> + * @note The two completed APIs also support return the last completed
>> + * operation's ring_idx.
> 
> Not sure why this note here.
> 
>> + * @note If the dmadev works in silent mode (@see RTE_DMADEV_CAPA_SILENT),
>> + * application does not invoke the above two completed APIs.
>> + *
>> + * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
>> + * rte_dmadev_fill()) returned, the rules are as follows:
> 
> I feel a word is missing above.

Can you point it out specifically ?
PS: I specifically examined by access https://www.nounplus.net/grammarcheck/ and found
it prompts the 'enqueue' to 'enqueues or enqueued'.

> 
>> + *     - ring_idx for each virtual DMA channel are independent.
>> + *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
>> + *       when it reach UINT16_MAX, it wraps back to zero.
>> + *     - This ring_idx can be used by applications to track per-operation
>> + *       metadata in an application-defined circular ring.
>> + *     - The initial ring_idx of a virtual DMA channel is zero, after the
>> + *       device is stopped, the ring_idx needs to be reset to zero.
>> + *
>> + * One example:
>> + *     - step-1: start one dmadev
>> + *     - step-2: enqueue a copy operation, the ring_idx return is 0
>> + *     - step-3: enqueue a copy operation again, the ring_idx return is 1
>> + *     - ...
>> + *     - step-101: stop the dmadev
>> + *     - step-102: start the dmadev
>> + *     - step-103: enqueue a copy operation, the ring_idx return is 0
>> + *     - ...
>> + *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
>> + *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
>> + *     - ...
>> + *
>> + * The DMA operation address used in enqueue APIs (i.e. rte_dmadev_copy(),
>> + * rte_dmadev_copy_sg(), rte_dmadev_fill()) defined as rte_iova_t type. The
> 
> "is" defined
> 
>> + * dmadev supports two types of address: memory address and device address.
> 
> Please try to start new sentences on a new line.
> 
>> + *
>> + * - memory address: the source and destination address of the memory-to-memory
>> + * transfer type, or the source address of the memory-to-device transfer type,
>> + * or the destination address of the device-to-memory transfer type.
>> + * @note If the device support SVA (@see RTE_DMADEV_CAPA_SVA), the memory
>> + * address can be any VA address, otherwise it must be an IOVA address.
>> + *
>> + * - device address: the source and destination address of the device-to-device
>> + * transfer type, or the source address of the device-to-memory transfer type,
>> + * or the destination address of the memory-to-device transfer type.
>> + *
>> + * By default, all the functions of the dmadev API exported by a PMD are
> 
> What do you mean "by default"? Is there some exceptions?
> 
>> + * lock-free functions which assume to not be invoked in parallel on different
>> + * logical cores to work on the same target dmadev object.
>> + * @note Different virtual DMA channels on the same dmadev *DO NOT* support
>> + * parallel invocation because these virtual DMA channels share the same
>> + * HW-DMA-channel.
>> + *
>> + */
> [...]
>> +#define RTE_DMADEV_NAME_MAX_LEN	RTE_DEV_NAME_MAX_LEN
> 
> Why not using RTE_DEV_NAME_MAX_LEN directly?
> If you keep it, it should be commented, explaining whether it takes '\0'
> into account or not.
> 
>> +__rte_experimental
>> +bool
>> +rte_dmadev_is_valid_dev(uint16_t dev_id);
> 
> I would suggest dropping the final "_dev" in the function name.
> 
> 
>> +uint16_t
>> +rte_dmadev_count(void);
> 
> It would be safer to name it rte_dmadev_count_avail
> in case we need new kind of device count later.
> 
>> +
>> +/* Enumerates DMA device capabilities. */
> 
> You should group them with a doxygen group syntax.
> See https://patches.dpdk.org/project/dpdk/patch/20210830104232.598703-1-thomas@monjalon.net/
> 
>> +#define RTE_DMADEV_CAPA_MEM_TO_MEM	(1ull << 0)
> 
> Please use RTE_BIT macro (32 or 64).
> 
>> +/**< DMA device support memory-to-memory transfer.
>> + *
>> + * @see struct rte_dmadev_info::dev_capa
>> + */
> 
> It is preferred to have documentation before the item.

Is this particularly strong?
I use postfix comment style for whole doxygen comments. I think it's better to maintain a unified
style.

> 
> [...]
> 
>> +/**
>> + * A structure used to retrieve the information of a DMA device.
>> + */
>> +struct rte_dmadev_info {
>> +	struct rte_device *device; /**< Generic Device information. */
> 
> Please do not expose this.
> 
>> +	uint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */
>> +	uint16_t max_vchans;
>> +	/**< Maximum number of virtual DMA channels supported. */
>> +	uint16_t max_desc;
>> +	/**< Maximum allowed number of virtual DMA channel descriptors. */
>> +	uint16_t min_desc;
>> +	/**< Minimum allowed number of virtual DMA channel descriptors. */
>> +	uint16_t max_sges;
>> +	/**< Maximum number of source or destination scatter-gather entry
>> +	 * supported.
>> +	 * If the device does not support COPY_SG capability, this value can be
>> +	 * zero.
>> +	 * If the device supports COPY_SG capability, then rte_dmadev_copy_sg()
>> +	 * parameter nb_src/nb_dst should not exceed this value.
>> +	 */
>> +	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
> 
> What about adding NUMA node?
> 
>     /* Local NUMA memory ID. -1 if unknown. */
>     int16_t numa_node;
> 
>> +};
> 
>> +int
>> +rte_dmadev_info_get(uint16_t dev_id, struct rte_dmadev_info *dev_info);
> 
> In .h files, the return type should not be on a separate line.
> 
>> +
>> +/**
>> + * A structure used to configure a DMA device.
>> + */
> 
> You should mention where it is used with @see.
> 
>> +struct rte_dmadev_conf {
>> +	uint16_t nb_vchans;
>> +	/**< The number of virtual DMA channels to set up for the DMA device.
>> +	 * This value cannot be greater than the field 'max_vchans' of struct
>> +	 * rte_dmadev_info which get from rte_dmadev_info_get().
>> +	 */
>> +	bool enable_silent;
>> +	/**< Indicates whether to enable silent mode.
>> +	 * false-default mode, true-silent mode.
>> +	 * This value can be set to true only when the SILENT capability is
>> +	 * supported.
>> +	 *
>> +	 * @see RTE_DMADEV_CAPA_SILENT
>> +	 */
>> +};
> [...]
>> +#define RTE_DMADEV_ALL_VCHAN	0xFFFFu
> 
> Please do not add this kind of constant without a doxygen comment.
> 
> 
> It seems you don't manage the maximum number of devices.
> It is fixed in the .c file:
> 	struct rte_dmadev rte_dmadevices[RTE_DMADEV_MAX_DEVS];
> 
> Instead I would suggest a more dynamic approach with an init function,
> so the application can extend it before calling rte_eal_init.
> Please see how it is implemented here:
> https://patches.dpdk.org/project/dpdk/patch/20210730135533.417611-2-thomas@monjalon.net/
> 
> Above patch could also inspire you to start some docs in this patch.
> 
> 
> This series add one file per patch.
> Instead it would be better to have groups of features per patch,
> meaning the implementation and the driver interface should be
> in the same patch.
> You can split like this:
> 	1/ device allocation
> 	2/ configuration and start/stop
> 	3/ dataplane functions
> 
> I would suggest 2 more patches:
> 	4/ event notification
> see https://patches.dpdk.org/project/dpdk/patch/20210730135533.417611-3-thomas@monjalon.net/
> 	5/ multi-process
> see https://patches.dpdk.org/project/dpdk/patch/20210730135533.417611-5-thomas@monjalon.net/

The split mode you recommend is better.
But is this particularly strong ?  Because many acked-by and reviewed-by base on stand-alone file.
Does this division mean that a new acked/reviewed ?

> 
> 
> Thanks for the work
> 
> 
> .
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v21 1/7] dmadev: introduce DMA device library public APIs
  2021-09-09 12:45           ` Bruce Richardson
@ 2021-09-09 13:54             ` fengchengwen
  2021-09-09 14:26               ` Thomas Monjalon
  2021-09-09 14:28               ` Bruce Richardson
  0 siblings, 2 replies; 339+ messages in thread
From: fengchengwen @ 2021-09-09 13:54 UTC (permalink / raw)
  To: Bruce Richardson, Thomas Monjalon
  Cc: ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev, mb,
	nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

On 2021/9/9 20:45, Bruce Richardson wrote:
> On Thu, Sep 09, 2021 at 01:29:33PM +0200, Thomas Monjalon wrote:
>> 09/09/2021 13:18, Bruce Richardson:
>>> On Thu, Sep 09, 2021 at 12:33:00PM +0200, Thomas Monjalon wrote:
>>>> 07/09/2021 14:56, Chengwen Feng:
>>>>> + * The first three APIs are used to submit the operation request to the virtual
>>>>> + * DMA channel, if the submission is successful, an uint16_t ring_idx is
>>>>> + * returned, otherwise a negative number is returned.
>>>>
>>>> unsigned or negative? looks weird.
>>>
>>> May be, but it works well. We could perhaps rephase to make it less weird
>>> though:
>>> "if the submission is successful, a positive ring_idx <= UINT16_MAX is
>>>  returned, otherwise a negative number is returned."
>>
>> I am advocating for int16_t,
>> it makes a lot of things simpler.
>>
> 
> No, it doesn't work as you can't have wrap-around of the IDs once you use
> signed values - and that impacts both the end app and the internals of the
> drivers. Let's keep it as-is otherwise it will have massive impacts -
> including potential perf impacts.
> 
>>>>> + *
>>>>> + * The last API was used to issue doorbell to hardware, and also there are flags
>>>>> + * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
>>>>> + * same work.
>>>>
>>>> I don't understand this sentence.
>>>> You mean rte_dmadev_submit function?
>>>> Why past tense "was"?
>>>> Why having a redundant function?
>>>>
>>>
>>> Just because there are two ways to do something does not mean that one of
>>> them is redundant, as both may be more suitable for different situations.
>>
>> I agree.
>>
>>> When enqueuing a set of jobs to the device, having a separate submit
>>> outside a loop makes for clearer code than having a check for the last
>>> iteration inside the loop to set a special submit flag.  However, for cases
>>> where one item alone is to be submitted or there is a small set of jobs to
>>> be submitted sequentially, having a submit flag provides a lower-overhead
>>> way of doing the submission while still keeping the code clean.
>>
>> This kind of explanation may be missing in doxygen?
>>
> 
> It can be added, sure.
> 
>>>>> +bool
>>>>> +rte_dmadev_is_valid_dev(uint16_t dev_id);
>>>>
>>>> I would suggest dropping the final "_dev" in the function name.
>>>>
>>>
>>> The alternative, which I would support, is replacing "rte_dmadev" with
>>> "rte_dma" across the API. This would then become "rte_dma_is_valid_dev"
>>> which is clearer, since the dev is not part of the standard prefix. It also
>>> would fit in with a possible future function of "rte_dma_is_valid_vchan"
>>> for instance.
>>
>> Yes
>> The question is whether it would make sense to reserver rte_dma_ prefix
>> for some DMA functions which would be outside of dmadev lib?
>> If you think that all DMA functions will be in dmadev,
>> then yes we can shorten the prefix to rte_dma_.
>>
> 
> Well, any DPDK dma functions which are not in dma library should have the
> prefix of the library they are in e.g. rte_eal_dma_*, rte_pci_dma_*
> Therefore, I don't think name conflicts should be an issue, and I like
> having less typing to do in function names (and I believe Morten was
> strongly proposing this previously too)

The dmadev is rather short, if change I prefer all public API with rte_dma_ prefix,
and don't have rte_dma_dev_ prefix for such start/stop/close. (ps: the rte_eth_ also
have rte_eth_dev_close which is painful for OCD).

Also should the filename(e.g. rte_dmadev.h) and directory-name(lib/dmadev) also change ?

> 
>>>>> +uint16_t
>>>>> +rte_dmadev_count(void);
>>>>
>>>> It would be safer to name it rte_dmadev_count_avail
>>>> in case we need new kind of device count later.
>>>>
>>>
>>> If we change "dmadev" to "dma" this could then be
>>> "rte_dma_count_avail_dev".
>>
>> Yes
>>
>>>>> +/**
>>>>> + * A structure used to retrieve the information of a DMA device.
>>>>> + */
>>>>> +struct rte_dmadev_info {
>>>>> +	struct rte_device *device; /**< Generic Device information. */
>>>>
>>>> Please do not expose this.
>>>>
>>>>> +	uint64_t dev_capa; /**< Device capabilities (RTE_DMADEV_CAPA_*). */
>>>>> +	uint16_t max_vchans;
>>>>> +	/**< Maximum number of virtual DMA channels supported. */
>>>>> +	uint16_t max_desc;
>>>>> +	/**< Maximum allowed number of virtual DMA channel descriptors. */
>>>>> +	uint16_t min_desc;
>>>>> +	/**< Minimum allowed number of virtual DMA channel descriptors. */
>>>>> +	uint16_t max_sges;
>>>>> +	/**< Maximum number of source or destination scatter-gather entry
>>>>> +	 * supported.
>>>>> +	 * If the device does not support COPY_SG capability, this value can be
>>>>> +	 * zero.
>>>>> +	 * If the device supports COPY_SG capability, then rte_dmadev_copy_sg()
>>>>> +	 * parameter nb_src/nb_dst should not exceed this value.
>>>>> +	 */
>>>>> +	uint16_t nb_vchans; /**< Number of virtual DMA channel configured. */
>>>>
>>>> What about adding NUMA node?
>>>>
>>>>     /* Local NUMA memory ID. -1 if unknown. */
>>>>     int16_t numa_node;
>>>>
>>>
>>> That was omitted as it could be got through the device structure. If device
>>> is removed, we need to ensure all fields needed from device, such as numa
>>> node, are made available here.
>>
>> Ah yes, forgot about rte_device :)
>> Yes please remove rte_device from this struct.
>>
> .
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v21 1/7] dmadev: introduce DMA device library public APIs
  2021-09-09 13:33       ` fengchengwen
@ 2021-09-09 14:19         ` Thomas Monjalon
  0 siblings, 0 replies; 339+ messages in thread
From: Thomas Monjalon @ 2021-09-09 14:19 UTC (permalink / raw)
  To: fengchengwen
  Cc: bruce.richardson, dev, ferruh.yigit, jerinj, jerinjacobk,
	andrew.rybchenko, dev, mb, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh, kevin.laatz

09/09/2021 15:33, fengchengwen:
> On 2021/9/9 18:33, Thomas Monjalon wrote:
> > 07/09/2021 14:56, Chengwen Feng:
> >> + * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
> >> + * each HW-DMA-channel should be represented by a dmadev.
> >> + *
> >> + * The dmadev could create multiple virtual DMA channels, each virtual DMA
> >> + * channel represents a different transfer context. The DMA operation request
> >> + * must be submitted to the virtual DMA channel. e.g. Application could create
> >> + * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
> >> + * virtual DMA channel 1 for memory-to-device transfer scenario.
> > 
> > What is the benefit of virtual channels compared to have separate dmadevs
> > for the same HW channel?
> 
> This design is from the previous discussion [1]. If a dmadev is created for each
> virtual channel, there are many associations between the dmadevs. For example,
> channel operations of some devices need to interact with the kernel, the corresponding
> kernel operation handles need to be shared among multiple dmadevs. It's going to get
> more complicated.
> 
> [1] https://lore.kernel.org/dpdk-dev/c4a0ee30-f7b8-f8a1-463c-8eedaec82aea@huawei.com/

OK thanks for the explanation.

[...]
> >> + * be released by rte_dmadev_pmd_release() during the PCI/SoC device removing
> >> + * phase.
> > 
> > Again what is this phase?
> > I think freeing should be done only via the "close" function.
> 
> OK
> The allocate/release will be reconstructed with reference to rte_eth_dev_pci_generic_probe.

You shouldn't always mimic ethdev, there can be some misconceptions :)
I think you don't need PCI specific helper.

[...]
> >> + * @note If the dmadev works in silent mode (@see RTE_DMADEV_CAPA_SILENT),
> >> + * application does not invoke the above two completed APIs.
> >> + *
> >> + * About the ring_idx which enqueue APIs (e.g. rte_dmadev_copy()
> >> + * rte_dmadev_fill()) returned, the rules are as follows:
> > 
> > I feel a word is missing above.
> 
> Can you point it out specifically ?
> PS: I specifically examined by access https://www.nounplus.net/grammarcheck/ and found
> it prompts the 'enqueue' to 'enqueues or enqueued'.

After second read, I think it is a tense problem.
What about "returned" -> "return" ?

[...]
> >> +/**< DMA device support memory-to-memory transfer.
> >> + *
> >> + * @see struct rte_dmadev_info::dev_capa
> >> + */
> > 
> > It is preferred to have documentation before the item.
> 
> Is this particularly strong?
> I use postfix comment style for whole doxygen comments. I think it's better to maintain a unified
> style.

In general prefix comment is preferred.
Postfix comments are OK for short comments on the same line.

[...]
> > This series add one file per patch.
> > Instead it would be better to have groups of features per patch,
> > meaning the implementation and the driver interface should be
> > in the same patch.
> > You can split like this:
> > 	1/ device allocation
> > 	2/ configuration and start/stop
> > 	3/ dataplane functions
> > 
> > I would suggest 2 more patches:
> > 	4/ event notification
> > see https://patches.dpdk.org/project/dpdk/patch/20210730135533.417611-3-thomas@monjalon.net/
> > 	5/ multi-process
> > see https://patches.dpdk.org/project/dpdk/patch/20210730135533.417611-5-thomas@monjalon.net/
> 
> The split mode you recommend is better.
> But is this particularly strong ?

Yes, that's really better.

> Because many acked-by and reviewed-by base on stand-alone file.
> Does this division mean that a new acked/reviewed ?

You can keep the acks which are commong to the first 4 patches I guess
and ask for re-ack to others.




^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v21 1/7] dmadev: introduce DMA device library public APIs
  2021-09-09 13:54             ` fengchengwen
@ 2021-09-09 14:26               ` Thomas Monjalon
  2021-09-09 14:31                 ` Bruce Richardson
  2021-09-09 14:28               ` Bruce Richardson
  1 sibling, 1 reply; 339+ messages in thread
From: Thomas Monjalon @ 2021-09-09 14:26 UTC (permalink / raw)
  To: Bruce Richardson, fengchengwen
  Cc: dev, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

09/09/2021 15:54, fengchengwen:
> On 2021/9/9 20:45, Bruce Richardson wrote:
> > On Thu, Sep 09, 2021 at 01:29:33PM +0200, Thomas Monjalon wrote:
> >> 09/09/2021 13:18, Bruce Richardson:
> >>> On Thu, Sep 09, 2021 at 12:33:00PM +0200, Thomas Monjalon wrote:
> >>>> 07/09/2021 14:56, Chengwen Feng:
> >>>>> + * The first three APIs are used to submit the operation request to the virtual
> >>>>> + * DMA channel, if the submission is successful, an uint16_t ring_idx is
> >>>>> + * returned, otherwise a negative number is returned.
> >>>>
> >>>> unsigned or negative? looks weird.
> >>>
> >>> May be, but it works well. We could perhaps rephase to make it less weird
> >>> though:
> >>> "if the submission is successful, a positive ring_idx <= UINT16_MAX is
> >>>  returned, otherwise a negative number is returned."
> >>
> >> I am advocating for int16_t,
> >> it makes a lot of things simpler.
> > 
> > No, it doesn't work as you can't have wrap-around of the IDs once you use
> > signed values - and that impacts both the end app and the internals of the
> > drivers. Let's keep it as-is otherwise it will have massive impacts -
> > including potential perf impacts.

Not sure to understand what you mean.
Please could you explain what does not work and what is the perf impact?
I guess you want unsigned index for rings, then OK.
For device ID however, I believe signed integer is useful.

[...]
> >>>>> +bool
> >>>>> +rte_dmadev_is_valid_dev(uint16_t dev_id);
> >>>>
> >>>> I would suggest dropping the final "_dev" in the function name.
> >>>
> >>> The alternative, which I would support, is replacing "rte_dmadev" with
> >>> "rte_dma" across the API. This would then become "rte_dma_is_valid_dev"
> >>> which is clearer, since the dev is not part of the standard prefix. It also
> >>> would fit in with a possible future function of "rte_dma_is_valid_vchan"
> >>> for instance.
> >>
> >> Yes
> >> The question is whether it would make sense to reserver rte_dma_ prefix
> >> for some DMA functions which would be outside of dmadev lib?
> >> If you think that all DMA functions will be in dmadev,
> >> then yes we can shorten the prefix to rte_dma_.
> >>
> > 
> > Well, any DPDK dma functions which are not in dma library should have the
> > prefix of the library they are in e.g. rte_eal_dma_*, rte_pci_dma_*

Quite often, we skip the eal_ prefix, that's why I was thinking about
a possible namespace conflict. Anyway it could be managed.

> > Therefore, I don't think name conflicts should be an issue, and I like
> > having less typing to do in function names (and I believe Morten was
> > strongly proposing this previously too)
> 
> The dmadev is rather short, if change I prefer all public API with rte_dma_ prefix,
> and don't have rte_dma_dev_ prefix for such start/stop/close. (ps: the rte_eth_ also
> have rte_eth_dev_close which is painful for OCD).

Yes OK for rte_dma_ prefix everywhere.

> Also should the filename(e.g. rte_dmadev.h) and directory-name(lib/dmadev) also change ?

I believe it's better to keep dmadev as name of the lib and filename,
so it's consistent with other device classes.
What are the other opinions?




^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v21 1/7] dmadev: introduce DMA device library public APIs
  2021-09-09 13:54             ` fengchengwen
  2021-09-09 14:26               ` Thomas Monjalon
@ 2021-09-09 14:28               ` Bruce Richardson
  2021-09-09 15:12                 ` Morten Brørup
  1 sibling, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-09-09 14:28 UTC (permalink / raw)
  To: fengchengwen
  Cc: Thomas Monjalon, ferruh.yigit, jerinj, jerinjacobk,
	andrew.rybchenko, dev, mb, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh, kevin.laatz

On Thu, Sep 09, 2021 at 09:54:27PM +0800, fengchengwen wrote:
> On 2021/9/9 20:45, Bruce Richardson wrote:
> > On Thu, Sep 09, 2021 at 01:29:33PM +0200, Thomas Monjalon wrote:
> >> 09/09/2021 13:18, Bruce Richardson:
> >>> On Thu, Sep 09, 2021 at 12:33:00PM +0200, Thomas Monjalon wrote:
> >>>> 07/09/2021 14:56, Chengwen Feng:
> >>>>> + * The first three APIs are used to submit the operation request to the virtual
> >>>>> + * DMA channel, if the submission is successful, an uint16_t ring_idx is
> >>>>> + * returned, otherwise a negative number is returned.
> >>>>
> >>>> unsigned or negative? looks weird.
> >>>
> >>> May be, but it works well. We could perhaps rephase to make it less weird
> >>> though:
> >>> "if the submission is successful, a positive ring_idx <= UINT16_MAX is
> >>>  returned, otherwise a negative number is returned."
> >>
> >> I am advocating for int16_t,
> >> it makes a lot of things simpler.
> >>
> > 
> > No, it doesn't work as you can't have wrap-around of the IDs once you use
> > signed values - and that impacts both the end app and the internals of the
> > drivers. Let's keep it as-is otherwise it will have massive impacts -
> > including potential perf impacts.
> > 
> >>>>> + *
> >>>>> + * The last API was used to issue doorbell to hardware, and also there are flags
> >>>>> + * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
> >>>>> + * same work.
> >>>>
> >>>> I don't understand this sentence.
> >>>> You mean rte_dmadev_submit function?
> >>>> Why past tense "was"?
> >>>> Why having a redundant function?
> >>>>
> >>>
> >>> Just because there are two ways to do something does not mean that one of
> >>> them is redundant, as both may be more suitable for different situations.
> >>
> >> I agree.
> >>
> >>> When enqueuing a set of jobs to the device, having a separate submit
> >>> outside a loop makes for clearer code than having a check for the last
> >>> iteration inside the loop to set a special submit flag.  However, for cases
> >>> where one item alone is to be submitted or there is a small set of jobs to
> >>> be submitted sequentially, having a submit flag provides a lower-overhead
> >>> way of doing the submission while still keeping the code clean.
> >>
> >> This kind of explanation may be missing in doxygen?
> >>
> > 
> > It can be added, sure.
> > 
> >>>>> +bool
> >>>>> +rte_dmadev_is_valid_dev(uint16_t dev_id);
> >>>>
> >>>> I would suggest dropping the final "_dev" in the function name.
> >>>>
> >>>
> >>> The alternative, which I would support, is replacing "rte_dmadev" with
> >>> "rte_dma" across the API. This would then become "rte_dma_is_valid_dev"
> >>> which is clearer, since the dev is not part of the standard prefix. It also
> >>> would fit in with a possible future function of "rte_dma_is_valid_vchan"
> >>> for instance.
> >>
> >> Yes
> >> The question is whether it would make sense to reserver rte_dma_ prefix
> >> for some DMA functions which would be outside of dmadev lib?
> >> If you think that all DMA functions will be in dmadev,
> >> then yes we can shorten the prefix to rte_dma_.
> >>
> > 
> > Well, any DPDK dma functions which are not in dma library should have the
> > prefix of the library they are in e.g. rte_eal_dma_*, rte_pci_dma_*
> > Therefore, I don't think name conflicts should be an issue, and I like
> > having less typing to do in function names (and I believe Morten was
> > strongly proposing this previously too)
> 
> The dmadev is rather short, if change I prefer all public API with rte_dma_ prefix,
> and don't have rte_dma_dev_ prefix for such start/stop/close. (ps: the rte_eth_ also
> have rte_eth_dev_close which is painful for OCD).

I agree that having rte_dma_dev_* is unpleasant naming for those functions,
so if we use rte_dma_ as prefix, any dev should be at the end instead:
i.e. rte_dma_stop_dev, rte_dma_start_dev, rte_dma_close_dev, etc.

> 
> Also should the filename(e.g. rte_dmadev.h) and directory-name(lib/dmadev) also change ?
> 
I would keep those names intact.

/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v21 1/7] dmadev: introduce DMA device library public APIs
  2021-09-09 14:26               ` Thomas Monjalon
@ 2021-09-09 14:31                 ` Bruce Richardson
  0 siblings, 0 replies; 339+ messages in thread
From: Bruce Richardson @ 2021-09-09 14:31 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: fengchengwen, dev, ferruh.yigit, jerinj, jerinjacobk,
	andrew.rybchenko, mb, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh, kevin.laatz

On Thu, Sep 09, 2021 at 04:26:40PM +0200, Thomas Monjalon wrote:
> 09/09/2021 15:54, fengchengwen:
> > On 2021/9/9 20:45, Bruce Richardson wrote:
> > > On Thu, Sep 09, 2021 at 01:29:33PM +0200, Thomas Monjalon wrote:
> > >> 09/09/2021 13:18, Bruce Richardson:
> > >>> On Thu, Sep 09, 2021 at 12:33:00PM +0200, Thomas Monjalon wrote:
> > >>>> 07/09/2021 14:56, Chengwen Feng:
> > >>>>> + * The first three APIs are used to submit the operation request to the virtual
> > >>>>> + * DMA channel, if the submission is successful, an uint16_t ring_idx is
> > >>>>> + * returned, otherwise a negative number is returned.
> > >>>>
> > >>>> unsigned or negative? looks weird.
> > >>>
> > >>> May be, but it works well. We could perhaps rephase to make it less weird
> > >>> though:
> > >>> "if the submission is successful, a positive ring_idx <= UINT16_MAX is
> > >>>  returned, otherwise a negative number is returned."
> > >>
> > >> I am advocating for int16_t,
> > >> it makes a lot of things simpler.
> > > 
> > > No, it doesn't work as you can't have wrap-around of the IDs once you use
> > > signed values - and that impacts both the end app and the internals of the
> > > drivers. Let's keep it as-is otherwise it will have massive impacts -
> > > including potential perf impacts.
> 
> Not sure to understand what you mean.
> Please could you explain what does not work and what is the perf impact?
> I guess you want unsigned index for rings, then OK.

Yes, that is it.

> For device ID however, I believe signed integer is useful.

No objection to that.

> 
> [...]
> > >>>>> +bool
> > >>>>> +rte_dmadev_is_valid_dev(uint16_t dev_id);
> > >>>>
> > >>>> I would suggest dropping the final "_dev" in the function name.
> > >>>
> > >>> The alternative, which I would support, is replacing "rte_dmadev" with
> > >>> "rte_dma" across the API. This would then become "rte_dma_is_valid_dev"
> > >>> which is clearer, since the dev is not part of the standard prefix. It also
> > >>> would fit in with a possible future function of "rte_dma_is_valid_vchan"
> > >>> for instance.
> > >>
> > >> Yes
> > >> The question is whether it would make sense to reserver rte_dma_ prefix
> > >> for some DMA functions which would be outside of dmadev lib?
> > >> If you think that all DMA functions will be in dmadev,
> > >> then yes we can shorten the prefix to rte_dma_.
> > >>
> > > 
> > > Well, any DPDK dma functions which are not in dma library should have the
> > > prefix of the library they are in e.g. rte_eal_dma_*, rte_pci_dma_*
> 
> Quite often, we skip the eal_ prefix, that's why I was thinking about
> a possible namespace conflict. Anyway it could be managed.
> 
> > > Therefore, I don't think name conflicts should be an issue, and I like
> > > having less typing to do in function names (and I believe Morten was
> > > strongly proposing this previously too)
> > 
> > The dmadev is rather short, if change I prefer all public API with rte_dma_ prefix,
> > and don't have rte_dma_dev_ prefix for such start/stop/close. (ps: the rte_eth_ also
> > have rte_eth_dev_close which is painful for OCD).
> 
> Yes OK for rte_dma_ prefix everywhere.
> 
> > Also should the filename(e.g. rte_dmadev.h) and directory-name(lib/dmadev) also change ?
> 
> I believe it's better to keep dmadev as name of the lib and filename,
> so it's consistent with other device classes.
> What are the other opinions?

Definitely keep. It's one thing to have additional characters in the header
name, another to have them in the APIs.

/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v21 1/7] dmadev: introduce DMA device library public APIs
  2021-09-09 14:28               ` Bruce Richardson
@ 2021-09-09 15:12                 ` Morten Brørup
  0 siblings, 0 replies; 339+ messages in thread
From: Morten Brørup @ 2021-09-09 15:12 UTC (permalink / raw)
  To: Bruce Richardson, fengchengwen
  Cc: Thomas Monjalon, ferruh.yigit, jerinj, jerinjacobk,
	andrew.rybchenko, dev, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh, kevin.laatz

> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Bruce Richardson
> Sent: Thursday, 9 September 2021 16.29
> 
> On Thu, Sep 09, 2021 at 09:54:27PM +0800, fengchengwen wrote:
> > On 2021/9/9 20:45, Bruce Richardson wrote:
> > > On Thu, Sep 09, 2021 at 01:29:33PM +0200, Thomas Monjalon wrote:
> > >> 09/09/2021 13:18, Bruce Richardson:
> > >>> On Thu, Sep 09, 2021 at 12:33:00PM +0200, Thomas Monjalon wrote:
> > >>>> 07/09/2021 14:56, Chengwen Feng:

[snip]

> > >>>>> +bool
> > >>>>> +rte_dmadev_is_valid_dev(uint16_t dev_id);
> > >>>>
> > >>>> I would suggest dropping the final "_dev" in the function name.
> > >>>>
> > >>>
> > >>> The alternative, which I would support, is replacing "rte_dmadev"
> with
> > >>> "rte_dma" across the API. This would then become
> "rte_dma_is_valid_dev"
> > >>> which is clearer, since the dev is not part of the standard
> prefix. It also
> > >>> would fit in with a possible future function of
> "rte_dma_is_valid_vchan"
> > >>> for instance.
> > >>
> > >> Yes
> > >> The question is whether it would make sense to reserver rte_dma_
> prefix
> > >> for some DMA functions which would be outside of dmadev lib?
> > >> If you think that all DMA functions will be in dmadev,
> > >> then yes we can shorten the prefix to rte_dma_.
> > >>
> > >
> > > Well, any DPDK dma functions which are not in dma library should
> have the
> > > prefix of the library they are in e.g. rte_eal_dma_*, rte_pci_dma_*
> > > Therefore, I don't think name conflicts should be an issue, and I
> like
> > > having less typing to do in function names (and I believe Morten
> was
> > > strongly proposing this previously too)
> >
> > The dmadev is rather short, if change I prefer all public API with
> rte_dma_ prefix,
> > and don't have rte_dma_dev_ prefix for such start/stop/close. (ps:
> the rte_eth_ also
> > have rte_eth_dev_close which is painful for OCD).
> 
> I agree that having rte_dma_dev_* is unpleasant naming for those
> functions,
> so if we use rte_dma_ as prefix, any dev should be at the end instead:
> i.e. rte_dma_stop_dev, rte_dma_start_dev, rte_dma_close_dev, etc.
> 

I agree about using rte_dma_ as general prefix.

But I disagree about rte_dma_<action>_<object>() function names, such as rte_dma_stop_dev().

We should follow the convention of rte_dma_<object>_<action>(), like in the ethdev library, e.g. rte_eth_dev_get(), rte_eth_fec_get_capability().

Or simply rte_dma_<action>(), if the object is obvious and can be omitted.

I.e. rte_dma_dev_stop() or rte_dma_stop().

> >
> > Also should the filename(e.g. rte_dmadev.h) and directory-
> name(lib/dmadev) also change ?
> >
> I would keep those names intact.

Keep intact, as Bruce suggests. This also aligns with the ethdev library.

> 
> /Bruce


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v21 4/7] dmadev: introduce DMA device library implementation
  2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 4/7] dmadev: introduce DMA device library implementation Chengwen Feng
  2021-09-08  9:54     ` Walsh, Conor
@ 2021-09-15 13:51     ` Kevin Laatz
  2021-09-15 14:34       ` Bruce Richardson
  1 sibling, 1 reply; 339+ messages in thread
From: Kevin Laatz @ 2021-09-15 13:51 UTC (permalink / raw)
  To: Chengwen Feng, thomas, ferruh.yigit, bruce.richardson, jerinj,
	jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh

On 07/09/2021 13:56, Chengwen Feng wrote:
> This patch introduce DMA device library implementation which includes
> configuration and I/O with the DMA devices.
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
> Reviewed-by: Conor Walsh <conor.walsh@intel.com>
> ---
>   config/rte_config.h          |   3 +
>   lib/dmadev/meson.build       |   1 +
>   lib/dmadev/rte_dmadev.c      | 607 +++++++++++++++++++++++++++++++++++
>   lib/dmadev/rte_dmadev.h      | 118 ++++++-
>   lib/dmadev/rte_dmadev_core.h |   2 +
>   lib/dmadev/version.map       |   1 +
>   6 files changed, 720 insertions(+), 12 deletions(-)
>   create mode 100644 lib/dmadev/rte_dmadev.c
>
[snip]

>   /**
>    * @warning
> @@ -941,10 +1018,27 @@ rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
>    *   status array are also set.
>    */
>   __rte_experimental
> -uint16_t
> +static inline uint16_t
>   rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
>   			    const uint16_t nb_cpls, uint16_t *last_idx,
> -			    enum rte_dma_status_code *status);
> +			    enum rte_dma_status_code *status)
> +{
> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +	uint16_t idx;
> +
> +#ifdef RTE_DMADEV_DEBUG
> +	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
> +	    vchan >= dev->data->dev_conf.nb_vchans ||
> +	    nb_cpls == 0 || status == NULL)
> +		return 0;
> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
> +#endif
> +
> +	if (last_idx == NULL)
> +		last_idx = &idx;

Hi Chengwen,

An internal coverity scan on the IDXD dmadev driver patches flagged a 
potential null pointer dereference when using completed_status().

IMO it is a false positive for the driver code since it should be 
checked at the library API level, however the check is also not present 
in the library.

For the v22, can you add the NULL pointer check for status here, like 
you have for last_idx, please?

/Kevin

> +
> +	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
> +}
>   
>   #ifdef __cplusplus
>   }
>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v21 4/7] dmadev: introduce DMA device library implementation
  2021-09-15 13:51     ` Kevin Laatz
@ 2021-09-15 14:34       ` Bruce Richardson
  2021-09-15 14:47         ` Kevin Laatz
  0 siblings, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-09-15 14:34 UTC (permalink / raw)
  To: Kevin Laatz
  Cc: Chengwen Feng, thomas, ferruh.yigit, jerinj, jerinjacobk,
	andrew.rybchenko, dev, mb, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh

On Wed, Sep 15, 2021 at 02:51:55PM +0100, Kevin Laatz wrote:
> On 07/09/2021 13:56, Chengwen Feng wrote:
> > This patch introduce DMA device library implementation which includes
> > configuration and I/O with the DMA devices.
> > 
> > Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> > Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> > Acked-by: Morten Brørup <mb@smartsharesystems.com>
> > Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
> > Reviewed-by: Conor Walsh <conor.walsh@intel.com>
> > ---
> >   config/rte_config.h          |   3 +
> >   lib/dmadev/meson.build       |   1 +
> >   lib/dmadev/rte_dmadev.c      | 607 +++++++++++++++++++++++++++++++++++
> >   lib/dmadev/rte_dmadev.h      | 118 ++++++-
> >   lib/dmadev/rte_dmadev_core.h |   2 +
> >   lib/dmadev/version.map       |   1 +
> >   6 files changed, 720 insertions(+), 12 deletions(-)
> >   create mode 100644 lib/dmadev/rte_dmadev.c
> > 
> [snip]
> 
> >   /**
> >    * @warning
> > @@ -941,10 +1018,27 @@ rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
> >    *   status array are also set.
> >    */
> >   __rte_experimental
> > -uint16_t
> > +static inline uint16_t
> >   rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
> >   			    const uint16_t nb_cpls, uint16_t *last_idx,
> > -			    enum rte_dma_status_code *status);
> > +			    enum rte_dma_status_code *status)
> > +{
> > +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> > +	uint16_t idx;
> > +
> > +#ifdef RTE_DMADEV_DEBUG
> > +	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
> > +	    vchan >= dev->data->dev_conf.nb_vchans ||
> > +	    nb_cpls == 0 || status == NULL)
> > +		return 0;
> > +	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
> > +#endif
> > +
> > +	if (last_idx == NULL)
> > +		last_idx = &idx;
> 
> Hi Chengwen,
> 
> An internal coverity scan on the IDXD dmadev driver patches flagged a
> potential null pointer dereference when using completed_status().
> 
> IMO it is a false positive for the driver code since it should be checked at
> the library API level, however the check is also not present in the library.
> 
> For the v22, can you add the NULL pointer check for status here, like you
> have for last_idx, please?
> 
I think the check would have to be different than that for last_idx, since
the status pointer is a pointer to an array, rather than a single value -
which procludes a simple replacement in the wrapper function that the
compiler can inline away if unnecessary.
It's probably best to add it as a check in the debug block, with an
error-return if status is NULL.

/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v21 4/7] dmadev: introduce DMA device library implementation
  2021-09-15 14:34       ` Bruce Richardson
@ 2021-09-15 14:47         ` Kevin Laatz
  0 siblings, 0 replies; 339+ messages in thread
From: Kevin Laatz @ 2021-09-15 14:47 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: Chengwen Feng, thomas, ferruh.yigit, jerinj, jerinjacobk,
	andrew.rybchenko, dev, mb, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh

On 15/09/2021 15:34, Bruce Richardson wrote:
> On Wed, Sep 15, 2021 at 02:51:55PM +0100, Kevin Laatz wrote:
>> On 07/09/2021 13:56, Chengwen Feng wrote:
>>> This patch introduce DMA device library implementation which includes
>>> configuration and I/O with the DMA devices.
>>>
>>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
>>> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
>>> Acked-by: Morten Brørup <mb@smartsharesystems.com>
>>> Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
>>> Reviewed-by: Conor Walsh <conor.walsh@intel.com>
>>> ---
>>>    config/rte_config.h          |   3 +
>>>    lib/dmadev/meson.build       |   1 +
>>>    lib/dmadev/rte_dmadev.c      | 607 +++++++++++++++++++++++++++++++++++
>>>    lib/dmadev/rte_dmadev.h      | 118 ++++++-
>>>    lib/dmadev/rte_dmadev_core.h |   2 +
>>>    lib/dmadev/version.map       |   1 +
>>>    6 files changed, 720 insertions(+), 12 deletions(-)
>>>    create mode 100644 lib/dmadev/rte_dmadev.c
>>>
>> [snip]
>>
>>>    /**
>>>     * @warning
>>> @@ -941,10 +1018,27 @@ rte_dmadev_completed(uint16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
>>>     *   status array are also set.
>>>     */
>>>    __rte_experimental
>>> -uint16_t
>>> +static inline uint16_t
>>>    rte_dmadev_completed_status(uint16_t dev_id, uint16_t vchan,
>>>    			    const uint16_t nb_cpls, uint16_t *last_idx,
>>> -			    enum rte_dma_status_code *status);
>>> +			    enum rte_dma_status_code *status)
>>> +{
>>> +	struct rte_dmadev *dev = &rte_dmadevices[dev_id];
>>> +	uint16_t idx;
>>> +
>>> +#ifdef RTE_DMADEV_DEBUG
>>> +	if (!rte_dmadev_is_valid_dev(dev_id) || !dev->data->dev_started ||
>>> +	    vchan >= dev->data->dev_conf.nb_vchans ||
>>> +	    nb_cpls == 0 || status == NULL)
>>> +		return 0;
>>> +	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
>>> +#endif
>>> +
>>> +	if (last_idx == NULL)
>>> +		last_idx = &idx;
>> Hi Chengwen,
>>
>> An internal coverity scan on the IDXD dmadev driver patches flagged a
>> potential null pointer dereference when using completed_status().
>>
>> IMO it is a false positive for the driver code since it should be checked at
>> the library API level, however the check is also not present in the library.
>>
>> For the v22, can you add the NULL pointer check for status here, like you
>> have for last_idx, please?
>>
> I think the check would have to be different than that for last_idx, since
> the status pointer is a pointer to an array, rather than a single value -
> which procludes a simple replacement in the wrapper function that the
> compiler can inline away if unnecessary.
> It's probably best to add it as a check in the debug block, with an
> error-return if status is NULL.
+1

/Kevin


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v22 0/5] support dmadev
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (24 preceding siblings ...)
  2021-09-07 12:56 ` [dpdk-dev] [PATCH v21 " Chengwen Feng
@ 2021-09-16  3:41 ` Chengwen Feng
  2021-09-16  3:41   ` [dpdk-dev] [PATCH v22 1/5] dmadev: introduce DMA device library Chengwen Feng
                     ` (4 more replies)
  2021-09-24 10:53 ` [dpdk-dev] [PATCH v23 0/6] support dmadev Chengwen Feng
                   ` (3 subsequent siblings)
  29 siblings, 5 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-16  3:41 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch set contains five patch for new add dmadev.

Chengwen Feng (5):
  dmadev: introduce DMA device library
  dmadev: add control plane function support
  dmadev: add data plane function support
  dma/skeleton: introduce skeleton dmadev driver
  app/test: add dmadev API test

---
v22:
* function prefix change from rte_dmadev_* to rte_dma_*.
* change to prefix comment in most scenarios.
* dmadev dev_id use int16_t type.
* fix typo.
* organize patchsets in incremental mode.
v21:
* add comment for reserved fields of struct rte_dmadev.
v20:
* delete unnecessary and duplicate include header files.
* the conf_sz parameter is added to the configure and vchan-setup
  callbacks of the PMD, this is mainly used to enhance ABI
  compatibility.
* the rte_dmadev structure field is rearranged to reserve more space
  for I/O functions.
* fix some ambiguous and unnecessary comments.
* fix the potential memory leak of ut.
* redefine skeldma_init_once to skeldma_count.
* suppress rte_dmadev error output when execute ut.

 MAINTAINERS                            |    7 +
 app/test/meson.build                   |    4 +
 app/test/test_dmadev.c                 |   43 +
 app/test/test_dmadev_api.c             |  539 ++++++++++++
 config/rte_config.h                    |    3 +
 doc/api/doxy-api-index.md              |    1 +
 doc/api/doxy-api.conf.in               |    1 +
 doc/guides/dmadevs/index.rst           |   12 +
 doc/guides/index.rst                   |    1 +
 doc/guides/prog_guide/dmadev.rst       |  125 +++
 doc/guides/prog_guide/img/dmadev.svg   |  283 +++++++
 doc/guides/prog_guide/index.rst        |    1 +
 doc/guides/rel_notes/release_21_11.rst |    5 +
 drivers/dma/meson.build                |    6 +
 drivers/dma/skeleton/meson.build       |    7 +
 drivers/dma/skeleton/skeleton_dmadev.c |  571 +++++++++++++
 drivers/dma/skeleton/skeleton_dmadev.h |   61 ++
 drivers/dma/skeleton/version.map       |    3 +
 drivers/meson.build                    |    1 +
 lib/dmadev/meson.build                 |    7 +
 lib/dmadev/rte_dmadev.c                |  718 ++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 1073 ++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h           |  184 ++++
 lib/dmadev/rte_dmadev_pmd.h            |   60 ++
 lib/dmadev/version.map                 |   35 +
 lib/meson.build                        |    1 +
 26 files changed, 3752 insertions(+)
 create mode 100644 app/test/test_dmadev.c
 create mode 100644 app/test/test_dmadev_api.c
 create mode 100644 doc/guides/dmadevs/index.rst
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg
 create mode 100644 drivers/dma/meson.build
 create mode 100644 drivers/dma/skeleton/meson.build
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
 create mode 100644 drivers/dma/skeleton/version.map
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v22 1/5] dmadev: introduce DMA device library
  2021-09-16  3:41 ` [dpdk-dev] [PATCH v22 0/5] support dmadev Chengwen Feng
@ 2021-09-16  3:41   ` Chengwen Feng
  2021-09-16  3:41   ` [dpdk-dev] [PATCH v22 2/5] dmadev: add control plane function support Chengwen Feng
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-16  3:41 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

The 'dmadevice' is a generic type of DMA device.

This patch introduce the 'dmadevice' device allocation APIs and it's
multi-process support.

The infrastructure is prepared to welcome drivers in drivers/dma/

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 MAINTAINERS                            |   5 +
 config/rte_config.h                    |   3 +
 doc/api/doxy-api-index.md              |   1 +
 doc/api/doxy-api.conf.in               |   1 +
 doc/guides/dmadevs/index.rst           |  12 +
 doc/guides/index.rst                   |   1 +
 doc/guides/prog_guide/dmadev.rst       |  62 +++++
 doc/guides/prog_guide/img/dmadev.svg   | 283 +++++++++++++++++++
 doc/guides/prog_guide/index.rst        |   1 +
 doc/guides/rel_notes/release_21_11.rst |   4 +
 drivers/dma/meson.build                |   4 +
 drivers/meson.build                    |   1 +
 lib/dmadev/meson.build                 |   7 +
 lib/dmadev/rte_dmadev.c                | 358 +++++++++++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 133 +++++++++
 lib/dmadev/rte_dmadev_core.h           |  83 ++++++
 lib/dmadev/rte_dmadev_pmd.h            |  60 +++++
 lib/dmadev/version.map                 |  20 ++
 lib/meson.build                        |   1 +
 19 files changed, 1040 insertions(+)
 create mode 100644 doc/guides/dmadevs/index.rst
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg
 create mode 100644 drivers/dma/meson.build
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 1e0d303394..8af9522a5a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -452,6 +452,11 @@ F: app/test-regex/
 F: doc/guides/prog_guide/regexdev.rst
 F: doc/guides/regexdevs/features/default.ini
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+F: doc/guides/prog_guide/dmadev.rst
+
 Eventdev API
 M: Jerin Jacob <jerinj@marvell.com>
 T: git://dpdk.org/next/dpdk-next-eventdev
diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c07d..6e397a62ab 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -70,6 +70,9 @@
 /* regexdev defines */
 #define RTE_MAX_REGEXDEV_DEVS 32
 
+/* dmadev defines */
+#define RTE_DMADEV_DEFAULT_MAX_DEVS 64
+
 /* eventdev defines */
 #define RTE_EVENT_MAX_DEVS 16
 #define RTE_EVENT_MAX_QUEUES_PER_DEV 255
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107a03..2939050431 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -21,6 +21,7 @@ The public API headers are grouped by topics:
   [compressdev]        (@ref rte_compressdev.h),
   [compress]           (@ref rte_comp.h),
   [regexdev]           (@ref rte_regexdev.h),
+  [dmadev]             (@ref rte_dmadev.h),
   [eventdev]           (@ref rte_eventdev.h),
   [event_eth_rx_adapter]   (@ref rte_event_eth_rx_adapter.h),
   [event_eth_tx_adapter]   (@ref rte_event_eth_tx_adapter.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 325a0195c6..109ec1f682 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -35,6 +35,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
                           @TOPDIR@/lib/distributor \
+                          @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/efd \
                           @TOPDIR@/lib/ethdev \
                           @TOPDIR@/lib/eventdev \
diff --git a/doc/guides/dmadevs/index.rst b/doc/guides/dmadevs/index.rst
new file mode 100644
index 0000000000..0bce29d766
--- /dev/null
+++ b/doc/guides/dmadevs/index.rst
@@ -0,0 +1,12 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright 2021 HiSilicon Limited
+
+DMA Device Drivers
+==================
+
+The following are a list of DMA device drivers, which can be used from
+an application through DMA API.
+
+.. toctree::
+   :maxdepth: 2
+   :numbered:
diff --git a/doc/guides/index.rst b/doc/guides/index.rst
index 857f0363d3..919825992e 100644
--- a/doc/guides/index.rst
+++ b/doc/guides/index.rst
@@ -21,6 +21,7 @@ DPDK documentation
    compressdevs/index
    vdpadevs/index
    regexdevs/index
+   dmadevs/index
    eventdevs/index
    rawdevs/index
    mempool/index
diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
new file mode 100644
index 0000000000..c1c7579107
--- /dev/null
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -0,0 +1,62 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright 2021 HiSilicon Limited
+
+DMA Device Library
+==================
+
+The DMA library provides a DMA device framework for management and provisioning
+of hardware and software DMA poll mode drivers, defining generic APIs which
+support a number of different DMA operations.
+
+
+Design Principles
+-----------------
+
+The DMA library follows the same basic principles as those used in DPDK's
+Ethernet Device framework and the RegEx framework. The DMA framework provides
+a generic DMA device framework which supports both physical (hardware)
+and virtual (software) DMA devices as well as a generic DMA API which allows
+DMA devices to be managed and configured and supports DMA operations to be
+provisioned on DMA poll mode driver.
+
+.. _figure_dmadev:
+
+.. figure:: img/dmadev.*
+
+The above figure shows the model on which the DMA framework is built on:
+
+ * The DMA controller could have multiple hardware DMA channels (aka. hardware
+   DMA queues), each hardware DMA channel should be represented by a dmadev.
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+   channel represents a different transfer context. The DMA operation request
+   must be submitted to the virtual DMA channel. e.g. Application could create
+   virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+   virtual DMA channel 1 for memory-to-device transfer scenario.
+
+
+Device Management
+-----------------
+
+Device Creation
+~~~~~~~~~~~~~~~
+
+Physical DMA controllers are discovered during the PCI probe/enumeration of the
+EAL function which is executed at DPDK initialization, this is based on their
+PCI BDF (bus/bridge, device, function). Specific physical DMA controllers, like
+other physical devices in DPDK can be listed using the EAL command line options.
+
+The dmadevs are dynamically allocated by using the API
+``rte_dma_pmd_allocate`` based on the number of hardware DMA channels.
+
+
+Device Identification
+~~~~~~~~~~~~~~~~~~~~~
+
+Each DMA device, whether physical or virtual is uniquely designated by two
+identifiers:
+
+- A unique device index used to designate the DMA device in all functions
+  exported by the DMA API.
+
+- A device name used to designate the DMA device in console messages, for
+  administration or debugging purposes.
diff --git a/doc/guides/prog_guide/img/dmadev.svg b/doc/guides/prog_guide/img/dmadev.svg
new file mode 100644
index 0000000000..157d7eb7dc
--- /dev/null
+++ b/doc/guides/prog_guide/img/dmadev.svg
@@ -0,0 +1,283 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<!-- SPDX-License-Identifier: BSD-3-Clause -->
+<!-- Copyright(c) 2021 HiSilicon Limited -->
+
+<svg
+   width="128.64288mm"
+   height="95.477707mm"
+   viewBox="0 0 192.96433 143.21656"
+   version="1.1"
+   id="svg934"
+   inkscape:version="1.1 (c68e22c387, 2021-05-23)"
+   sodipodi:docname="dmadev.svg"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg">
+  <sodipodi:namedview
+     id="namedview936"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageshadow="2"
+     inkscape:pageopacity="0.0"
+     inkscape:pagecheckerboard="0"
+     inkscape:document-units="mm"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:showpageshadow="false"
+     inkscape:zoom="1.332716"
+     inkscape:cx="335.03011"
+     inkscape:cy="143.69152"
+     inkscape:window-width="1920"
+     inkscape:window-height="976"
+     inkscape:window-x="-8"
+     inkscape:window-y="-8"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="layer1"
+     scale-x="1.5"
+     units="mm" />
+  <defs
+     id="defs931">
+    <rect
+       x="342.43954"
+       y="106.56832"
+       width="58.257381"
+       height="137.82834"
+       id="rect17873" />
+  </defs>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-0.13857517,-21.527306)">
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9"
+       width="50"
+       height="28"
+       x="0.13857517"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1"
+       transform="translate(-49.110795,15.205683)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1045">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1047">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5"
+       width="50"
+       height="28"
+       x="60.138577"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4"
+       transform="translate(10.512565,15.373298)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1049">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1051">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5-3"
+       width="50"
+       height="28"
+       x="137.43863"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-8"
+       transform="translate(88.79231,15.373299)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1053">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1055">channel</tspan></text>
+    <text
+       xml:space="preserve"
+       transform="matrix(0.26458333,0,0,0.26458333,-0.04940429,21.408845)"
+       id="text17871"
+       style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;white-space:pre;shape-inside:url(#rect17873);fill:#000000;fill-opacity:1;stroke:none" />
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8"
+       width="38.34557"
+       height="19.729115"
+       x="36.138577"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3"
+       transform="translate(-13.394978,59.135217)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1057">dmadev</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0"
+       width="60.902534"
+       height="24.616455"
+       x="25.196909"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76"
+       transform="translate(-24.485484,90.97883)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1059">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1061">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-6"
+       width="60.902534"
+       height="24.616455"
+       x="132.20036"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-7"
+       transform="translate(82.950904,90.79085)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1063">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1065">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-4"
+       width="60.902534"
+       height="24.616455"
+       x="76.810928"
+       y="140.12741"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-4"
+       transform="translate(27.032341,133.10574)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1067">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1069">controller</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8-5"
+       width="38.34557"
+       height="19.729115"
+       x="143.43863"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-7"
+       transform="translate(94.92597,59.664385)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1071">dmadev</tspan></text>
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 74.476373,49.527306 62.82407,64.827354"
+       id="path45308"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 35.924309,49.527306 47.711612,64.827354"
+       id="path45310"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 55.403414,84.556469 55.53332,98.47744"
+       id="path45312"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8"
+       inkscape:connection-end="#rect31-9-5-8-0" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.62241,84.556469 0.0155,13.920971"
+       id="path45320"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-5"
+       inkscape:connection-end="#rect31-9-5-8-0-6" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 146.28317,123.09389 -22.65252,17.03352"
+       id="path45586"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0-6"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 70.900938,123.09389 21.108496,17.03352"
+       id="path45588"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.50039,49.527306 0.0675,15.300048"
+       id="path45956"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-3"
+       inkscape:connection-end="#rect31-9-5-8-5" />
+  </g>
+</svg>
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 2dce507f46..89af28dacb 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -27,6 +27,7 @@ Programmer's Guide
     cryptodev_lib
     compressdev
     regexdev
+    dmadev
     rte_security
     rawdev
     link_bonding_poll_mode_drv_lib
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 43d367bcad..5a85198e0d 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -87,6 +87,10 @@ New Features
   Added command-line options to specify total number of processes and
   current process ID. Each process owns subset of Rx and Tx queues.
 
+* **Introduced dmadev library with:**
+
+  * Device allocation and it's multi-process support.
+
 
 Removed Items
 -------------
diff --git a/drivers/dma/meson.build b/drivers/dma/meson.build
new file mode 100644
index 0000000000..a24c56d8ff
--- /dev/null
+++ b/drivers/dma/meson.build
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2021 HiSilicon Limited
+
+drivers = []
diff --git a/drivers/meson.build b/drivers/meson.build
index d9e331ec85..a390787d6a 100644
--- a/drivers/meson.build
+++ b/drivers/meson.build
@@ -18,6 +18,7 @@ subdirs = [
         'vdpa',           # depends on common, bus and mempool.
         'event',          # depends on common, bus, mempool and net.
         'baseband',       # depends on common and bus.
+        'dma',            # depends on common and bus.
 ]
 
 if meson.is_cross_build()
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000000..d2fc85e8c7
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+sources = files('rte_dmadev.c')
+headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000000..12d8302f15
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include <inttypes.h>
+
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dma_dev *rte_dma_devices;
+static int16_t dma_devices_max;
+static struct {
+	/* Hold the dev_max information of the primary process. This field is
+	 * set by the primary process and is read by the secondary process.
+	 */
+	int16_t dev_max;
+	struct rte_dma_dev_data data[0];
+} *dma_devices_shared_data;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
+#define RTE_DMA_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, rte_dma_logtype, "%s(): " fmt "\n", \
+		__func__, ##args)
+
+/* Macros to check for valid device id */
+#define RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dma_is_valid(dev_id)) { \
+		RTE_DMA_LOG(ERR, "Invalid dev_id=%d", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+int
+rte_dma_dev_max(size_t dev_max)
+{
+	/* This function may be called before rte_eal_init(), so no rte library
+	 * function can be called in this function.
+	 */
+	if (dev_max == 0 || dev_max > INT16_MAX)
+		return -EINVAL;
+
+	if (dma_devices_max > 0)
+		return -EINVAL;
+
+	dma_devices_max = dev_max;
+
+	return 0;
+}
+
+static int
+dma_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMA_LOG(ERR, "Name can't be NULL");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMA_LOG(ERR, "Zero length DMA device name");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DEV_NAME_MAX_LEN) {
+		RTE_DMA_LOG(ERR, "DMA device name is too long");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int16_t
+dma_find_free_dev(void)
+{
+	int16_t i;
+
+	for (i = 0; i < dma_devices_max; i++) {
+		if (dma_devices_shared_data->data[i].dev_name[0] == '\0')
+			return i;
+	}
+
+	return -1;
+}
+
+static struct rte_dma_dev*
+dma_find(const char *name)
+{
+	int16_t i;
+
+	for (i = 0; i < dma_devices_max; i++) {
+		if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) &&
+		    (!strcmp(name, rte_dma_devices[i].data->dev_name)))
+			return &rte_dma_devices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dma_process_data_prepare(void)
+{
+	size_t size;
+	void *ptr;
+
+	if (rte_dma_devices != NULL)
+		return 0;
+
+	/* The return value of malloc may not be aligned to the cache line.
+	 * Therefore, extra memory is applied for realignment.
+	 * note: We do not call posix_memalign/aligned_alloc because it is
+	 * version dependent on libc.
+	 */
+	size = dma_devices_max * sizeof(struct rte_dma_dev) +
+		RTE_CACHE_LINE_SIZE;
+	ptr = malloc(size);
+	if (ptr == NULL)
+		return -ENOMEM;
+	memset(ptr, 0, size);
+
+	rte_dma_devices = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE);
+
+	return 0;
+}
+
+static int
+dma_shared_data_prepare(void)
+{
+	const char *mz_name = "rte_dma_dev_data";
+	const struct rte_memzone *mz;
+	size_t size;
+
+	if (dma_devices_shared_data != NULL)
+		return 0;
+
+	size = sizeof(*dma_devices_shared_data) +
+		sizeof(struct rte_dma_dev_data) * dma_devices_max;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		mz = rte_memzone_reserve(mz_name, size, rte_socket_id(), 0);
+	else
+		mz = rte_memzone_lookup(mz_name);
+	if (mz == NULL)
+		return -ENOMEM;
+
+	dma_devices_shared_data = mz->addr;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		memset(dma_devices_shared_data, 0, size);
+		dma_devices_shared_data->dev_max = dma_devices_max;
+	} else {
+		dma_devices_max = dma_devices_shared_data->dev_max;
+	}
+
+	return 0;
+}
+
+static int
+dma_data_prepare(void)
+{
+	int ret;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		if (dma_devices_max == 0)
+			dma_devices_max = RTE_DMADEV_DEFAULT_MAX_DEVS;
+		ret = dma_process_data_prepare();
+		if (ret)
+			return ret;
+		ret = dma_shared_data_prepare();
+		if (ret)
+			return ret;
+	} else {
+		ret = dma_shared_data_prepare();
+		if (ret)
+			return ret;
+		ret = dma_process_data_prepare();
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static struct rte_dma_dev *
+dma_allocate_primary(const char *name, int numa_node, size_t private_data_size)
+{
+	struct rte_dma_dev *dev;
+	void *dev_private;
+	int16_t dev_id;
+	int ret;
+
+	ret = dma_data_prepare();
+	if (ret < 0) {
+		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
+		return NULL;
+	}
+
+	dev = dma_find(name);
+	if (dev != NULL) {
+		RTE_DMA_LOG(ERR, "DMA device already allocated");
+		return NULL;
+	}
+
+	dev_private = rte_zmalloc_socket(name, private_data_size,
+					 RTE_CACHE_LINE_SIZE, numa_node);
+	if (dev_private == NULL) {
+		RTE_DMA_LOG(ERR, "Cannot allocate private data");
+		return NULL;
+	}
+
+	dev_id = dma_find_free_dev();
+	if (dev_id < 0) {
+		RTE_DMA_LOG(ERR, "Reached maximum number of DMA devices");
+		rte_free(dev_private);
+		return NULL;
+	}
+
+	dev = &rte_dma_devices[dev_id];
+	dev->dev_private = dev_private;
+	dev->data = &dma_devices_shared_data->data[dev_id];
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+	dev->data->dev_id = dev_id;
+	dev->data->numa_node = numa_node;
+	dev->data->dev_private = dev_private;
+
+	return dev;
+}
+
+static struct rte_dma_dev *
+dma_attach_secondary(const char *name)
+{
+	struct rte_dma_dev *dev;
+	int16_t i;
+	int ret;
+
+	ret = dma_data_prepare();
+	if (ret < 0) {
+		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
+		return NULL;
+	}
+
+	for (i = 0; i < dma_devices_max; i++) {
+		if (!strcmp(dma_devices_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == dma_devices_max) {
+		RTE_DMA_LOG(ERR,
+			"Device %s is not driven by the primary process",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dma_devices[i];
+	dev->data = &dma_devices_shared_data->data[i];
+	dev->dev_private = dev->data->dev_private;
+
+	return dev;
+}
+
+static struct rte_dma_dev *
+dma_allocate(const char *name, int numa_node, size_t private_data_size)
+{
+	struct rte_dma_dev *dev;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dma_allocate_primary(name, numa_node, private_data_size);
+	else
+		dev = dma_attach_secondary(name);
+
+	return dev;
+}
+
+static void
+dma_release(struct rte_dma_dev *dev)
+{
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		memset(dev->data, 0, sizeof(struct rte_dma_dev_data));
+		rte_free(dev->dev_private);
+	}
+
+	memset(dev, 0, sizeof(struct rte_dma_dev));
+}
+
+struct rte_dma_dev *
+rte_dma_pmd_allocate(const char *name, int numa_node, size_t private_data_size)
+{
+	struct rte_dma_dev *dev;
+
+	if (dma_check_name(name) != 0 || private_data_size == 0)
+		return NULL;
+
+	dev = dma_allocate(name, numa_node, private_data_size);
+	if (dev == NULL)
+		return NULL;
+
+	dev->state = RTE_DMA_DEV_REGISTERED;
+
+	return dev;
+}
+
+int
+rte_dma_pmd_release(const char *name)
+{
+	struct rte_dma_dev *dev;
+
+	if (dma_check_name(name) != 0)
+		return -EINVAL;
+
+	dev = dma_find(name);
+	if (dev == NULL)
+		return -EINVAL;
+
+	dma_release(dev);
+	return 0;
+}
+
+int
+rte_dma_get_dev_id(const char *name)
+{
+	struct rte_dma_dev *dev;
+
+	if (dma_check_name(name) != 0)
+		return -EINVAL;
+
+	dev = dma_find(name);
+	if (dev == NULL)
+		return -EINVAL;
+
+	return dev->data->dev_id;
+}
+
+bool
+rte_dma_is_valid(int16_t dev_id)
+{
+	return (dev_id >= 0) && (dev_id < dma_devices_max) &&
+		rte_dma_devices[dev_id].state != RTE_DMA_DEV_UNUSED;
+}
+
+uint16_t
+rte_dma_count_avail(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	for (i = 0; i < dma_devices_max; i++) {
+		if (rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED)
+			count++;
+	}
+
+	return count;
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000000..6074bae25d
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ * Copyright(c) 2021 Intel Corporation
+ * Copyright(c) 2021 Marvell International Ltd
+ * Copyright(c) 2021 SmartShare Systems
+ */
+
+#ifndef RTE_DMADEV_H
+#define RTE_DMADEV_H
+
+/**
+ * @file rte_dmadev.h
+ *
+ * DMA (Direct Memory Access) device API.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW DMA channel |               | HW DMA channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW DMA Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel. e.g. Application could create
+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+ * virtual DMA channel 1 for memory-to-device transfer scenario.
+ *
+ * The dmadev are dynamically allocated by rte_dma_pmd_allocate() during the
+ * PCI/SoC device probing phase performed at EAL initialization time. And could
+ * be released by rte_dma_pmd_release() during the PCI/SoC device removing
+ * phase.
+ *
+ * This framework uses 'int16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ */
+
+#include <stdint.h>
+
+#include <rte_bitops.h>
+#include <rte_common.h>
+#include <rte_compat.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure the maximum number of dmadevs.
+ * @note This function can be invoked before the primary process rte_eal_init()
+ * to change the maximum number of dmadevs.
+ *
+ * @param dev_max
+ *   maximum number of dmadevs.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_dev_max(size_t dev_max);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the device identifier for the named DMA device.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   Returns DMA device identifier on success.
+ *   - <0: Failure to find named DMA device.
+ */
+__rte_experimental
+int rte_dma_get_dev_id(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool rte_dma_is_valid(int16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t rte_dma_count_avail(void);
+
+#include "rte_dmadev_core.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_DMADEV_H */
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000000..1ce2cf0bf1
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#ifndef RTE_DMADEV_CORE_H
+#define RTE_DMADEV_CORE_H
+
+/**
+ * @file
+ *
+ * DMA Device internal header.
+ *
+ * This header contains internal data types, that are used by the DMA devices
+ * in order to expose their ops to the class.
+ *
+ * Applications should not use these API directly.
+ *
+ */
+
+#include <rte_dev.h>
+
+/**
+ * Possible states of a DMA device.
+ *
+ * @see struct rte_dmadev::state
+ */
+enum rte_dma_dev_state {
+	RTE_DMA_DEV_UNUSED = 0, /**< Device is unused. */
+	/** Device is registered, but not ready to be used. */
+	RTE_DMA_DEV_REGISTERED,
+	/** Device is ready for use. This is set by the PMD. */
+	RTE_DMA_DEV_READY,
+
+};
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ *
+ * @see struct rte_dmadev::data
+ */
+struct rte_dma_dev_data {
+	char dev_name[RTE_DEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	int16_t dev_id; /**< Device [external] identifier. */
+	int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */
+	/** PMD-specific private data.
+	 * This is a copy of the 'dev_private' field in the 'struct rte_dmadev'
+	 * from primary process, it is used by the secondary process to get
+	 * dev_private information.
+	 */
+	void *dev_private;
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ *
+ * The dataplane APIs are located at the beginning of the structure, along
+ * with the pointer to where all the data elements for the particular device
+ * are stored in shared memory. This split scheme allows the function pointer
+ * and driver data to be per-process, while the actual configuration data for
+ * the device is shared.
+ * And the 'dev_private' field was placed in the first cache line to optimize
+ * performance because the PMD driver mainly depends on this field.
+ */
+struct rte_dma_dev {
+	void *dev_private; /**< PMD-specific private data. */
+	struct rte_dma_dev_data *data; /**< Pointer to device data. */
+	/** Device info which supplied during device initialization. */
+	struct rte_device *device;
+	enum rte_dma_dev_state state; /**< Flag indicating the device state. */
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+} __rte_cache_aligned;
+
+extern struct rte_dma_dev *rte_dma_devices;
+
+#endif /* RTE_DMADEV_CORE_H */
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000000..02281c74fd
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+#ifndef RTE_DMADEV_PMD_H
+#define RTE_DMADEV_PMD_H
+
+/**
+ * @file
+ *
+ * DMA Device PMD APIs
+ *
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ * @param numa_node
+ *   Driver's private data's numa node.
+ * @param private_data_size
+ *   Driver's private data size.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dma_dev *rte_dma_pmd_allocate(const char *name, int numa_node,
+					 size_t private_data_size);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   - 0 on success, negative on error.
+ */
+__rte_internal
+int rte_dma_pmd_release(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_DMADEV_PMD_H */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000000..56ea0332cb
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,20 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dma_count_avail;
+	rte_dma_dev_max;
+	rte_dma_get_dev_id;
+	rte_dma_is_valid;
+
+	local: *;
+};
+
+INTERNAL {
+	global:
+
+	rte_dma_devices;
+	rte_dma_pmd_allocate;
+	rte_dma_pmd_release;
+
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4323..3dd920f5c5 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -45,6 +45,7 @@ libraries = [
         'pdump',
         'rawdev',
         'regexdev',
+        'dmadev',
         'rib',
         'reorder',
         'sched',
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v22 2/5] dmadev: add control plane function support
  2021-09-16  3:41 ` [dpdk-dev] [PATCH v22 0/5] support dmadev Chengwen Feng
  2021-09-16  3:41   ` [dpdk-dev] [PATCH v22 1/5] dmadev: introduce DMA device library Chengwen Feng
@ 2021-09-16  3:41   ` Chengwen Feng
  2021-09-16  3:41   ` [dpdk-dev] [PATCH v22 3/5] dmadev: add data " Chengwen Feng
                     ` (2 subsequent siblings)
  4 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-16  3:41 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch add control plane functions for dmadev.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 doc/guides/prog_guide/dmadev.rst       |  41 +++
 doc/guides/rel_notes/release_21_11.rst |   1 +
 lib/dmadev/rte_dmadev.c                | 360 +++++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 479 +++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h           |  60 ++++
 lib/dmadev/version.map                 |   9 +
 6 files changed, 950 insertions(+)

diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
index c1c7579107..c724fefcb0 100644
--- a/doc/guides/prog_guide/dmadev.rst
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -60,3 +60,44 @@ identifiers:
 
 - A device name used to designate the DMA device in console messages, for
   administration or debugging purposes.
+
+
+Device Configuration
+~~~~~~~~~~~~~~~~~~~~
+
+The rte_dma_configure API is used to configure a DMA device.
+
+.. code-block:: c
+
+   int rte_dma_configure(int16_t dev_id,
+                         const struct rte_dma_conf *dev_conf);
+
+The ``rte_dma_conf`` structure is used to pass the configuration parameters
+for the DMA device for example the number of virtual DMA channels to set up,
+indication of whether to enable silent mode.
+
+
+Configuration of Virtual DMA Channels
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The rte_dma_vchan_setup API is used to configure a virtual DMA channel.
+
+.. code-block:: c
+
+   int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+                           const struct rte_dma_vchan_conf *conf);
+
+The ``rte_dma_vchan_conf`` structure is used to pass the configuration
+parameters for the virtual DMA channel for example transfer direction, number of
+descriptor for the virtual DMA channel, source device access port parameter,
+destination device access port parameter.
+
+
+Device Features and Capabilities
+--------------------------------
+
+DMA devices may support different feature sets. The ``rte_dma_info_get`` API
+can be used to get the device info and supported features.
+
+Silent mode is a special device capability which does not require the
+application to invoke dequeue APIs.
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 5a85198e0d..eb0315aaca 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -90,6 +90,7 @@ New Features
 * **Introduced dmadev library with:**
 
   * Device allocation and it's multi-process support.
+  * Control plane functions.
 
 
 Removed Items
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
index 12d8302f15..544937acf8 100644
--- a/lib/dmadev/rte_dmadev.c
+++ b/lib/dmadev/rte_dmadev.c
@@ -317,6 +317,9 @@ rte_dma_pmd_release(const char *name)
 	if (dev == NULL)
 		return -EINVAL;
 
+	if (dev->state == RTE_DMA_DEV_READY)
+		return rte_dma_close(dev->data->dev_id);
+
 	dma_release(dev);
 	return 0;
 }
@@ -356,3 +359,360 @@ rte_dma_count_avail(void)
 
 	return count;
 }
+
+int
+rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
+{
+	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dma_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dma_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->numa_node = dev->device->numa_node;
+	dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
+
+	return 0;
+}
+
+int
+rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	struct rte_dma_info dev_info;
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMA_LOG(ERR,
+			"Device %d must be stopped to allow configuration",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dma_info_get(dev_id, &dev_info);
+	if (ret != 0) {
+		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans == 0) {
+		RTE_DMA_LOG(ERR,
+			"Device %d configure zero vchans", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans > dev_info.max_vchans) {
+		RTE_DMA_LOG(ERR,
+			"Device %d configure too many vchans", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_silent &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) {
+		RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
+					     sizeof(struct rte_dma_conf));
+	if (ret == 0)
+		memcpy(&dev->data->dev_conf, dev_conf,
+		       sizeof(struct rte_dma_conf));
+
+	return ret;
+}
+
+int
+rte_dma_start(int16_t dev_id)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_conf.nb_vchans == 0) {
+		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
+		return -EINVAL;
+	}
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->data->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dma_stop(int16_t dev_id)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->data->dev_started == 0) {
+		RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->data->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dma_close(int16_t dev_id)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->data->dev_started == 1) {
+		RTE_DMA_LOG(ERR,
+			"Device %d must be stopped before closing", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_close)(dev);
+	if (ret == 0)
+		dma_release(dev);
+
+	return ret;
+}
+
+int
+rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+		    const struct rte_dma_vchan_conf *conf)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	struct rte_dma_info dev_info;
+	bool src_is_dev, dst_is_dev;
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (conf == NULL)
+		return -EINVAL;
+
+	if (dev->data->dev_started != 0) {
+		RTE_DMA_LOG(ERR,
+			"Device %d must be stopped to allow configuration",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dma_info_get(dev_id, &dev_info);
+	if (ret != 0) {
+		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+		return -EINVAL;
+	}
+	if (dev->data->dev_conf.nb_vchans == 0) {
+		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
+		return -EINVAL;
+	}
+	if (vchan >= dev_info.nb_vchans) {
+		RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+		RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support mem2mem transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support mem2dev transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support dev2mem transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support dev2dev transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < dev_info.min_desc ||
+	    conf->nb_desc > dev_info.max_desc) {
+		RTE_DMA_LOG(ERR,
+			"Device %d number of descriptors invalid", dev_id);
+		return -EINVAL;
+	}
+	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) ||
+	    (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) {
+		RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id);
+		return -EINVAL;
+	}
+	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) ||
+	    (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d destination port type invalid", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
+					sizeof(struct rte_dma_vchan_conf));
+}
+
+int
+rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
+{
+	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (stats == NULL)
+		return -EINVAL;
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
+	    vchan != RTE_DMA_ALL_VCHAN) {
+		RTE_DMA_LOG(ERR,
+			"Device %d vchan %u out of range", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	memset(stats, 0, sizeof(struct rte_dma_stats));
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dma_stats));
+}
+
+int
+rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
+	    vchan != RTE_DMA_ALL_VCHAN) {
+		RTE_DMA_LOG(ERR,
+			"Device %d vchan %u out of range", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+static const char *
+dma_capability_name(uint64_t capability)
+{
+	static const struct {
+		uint64_t capability;
+		const char *name;
+	} capa_names[] = {
+		{ RTE_DMA_CAPA_MEM_TO_MEM,  "mem2mem" },
+		{ RTE_DMA_CAPA_MEM_TO_DEV,  "mem2dev" },
+		{ RTE_DMA_CAPA_DEV_TO_MEM,  "dev2mem" },
+		{ RTE_DMA_CAPA_DEV_TO_DEV,  "dev2dev" },
+		{ RTE_DMA_CAPA_SVA,         "sva"     },
+		{ RTE_DMA_CAPA_SILENT,      "silent"  },
+		{ RTE_DMA_CAPA_OPS_COPY,    "copy"    },
+		{ RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" },
+		{ RTE_DMA_CAPA_OPS_FILL,    "fill"    },
+	};
+
+	const char *name = "unknown";
+	uint32_t i;
+
+	for (i = 0; i < RTE_DIM(capa_names); i++) {
+		if (capability == capa_names[i].capability) {
+			name = capa_names[i].name;
+			break;
+		}
+	}
+
+	return name;
+}
+
+static void
+dma_dump_capability(FILE *f, uint64_t dev_capa)
+{
+	uint64_t capa;
+
+	fprintf(f, "  dev_capa: 0x%" PRIx64 " -", dev_capa);
+	while (dev_capa > 0) {
+		capa = 1ull << __builtin_ctzll(dev_capa);
+		fprintf(f, " %s", dma_capability_name(capa));
+		dev_capa &= ~capa;
+	}
+	fprintf(f, "\n");
+}
+
+int
+rte_dma_dump(int16_t dev_id, FILE *f)
+{
+	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	struct rte_dma_info dev_info;
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (f == NULL)
+		return -EINVAL;
+
+	ret = rte_dma_info_get(dev_id, &dev_info);
+	if (ret != 0) {
+		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+		return -EINVAL;
+	}
+
+	fprintf(f, "DMA Dev %d, '%s' [%s]\n",
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
+	dma_dump_capability(f, dev_info.dev_capa);
+	fprintf(f, "  max_vchans_supported: %u\n", dev_info.max_vchans);
+	fprintf(f, "  nb_vchans_configured: %u\n", dev_info.nb_vchans);
+	fprintf(f, "  silent_mode: %s\n",
+		dev->data->dev_conf.enable_silent ? "on" : "off");
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 6074bae25d..a66144488a 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -53,6 +53,28 @@
  * This framework uses 'int16_t dev_id' as the device identifier of a dmadev,
  * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
  *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dma_configure()
+ *     - rte_dma_vchan_setup()
+ *     - rte_dma_start()
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dma_configure() or rte_dma_vchan_setup()), it must invoke
+ * rte_dma_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dma_start() again. The dataplane functions should not
+ * be invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the rte_dma_close()
+ * function.
+ *
+ * About MT-safe, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
+ *
  */
 
 #include <stdint.h>
@@ -124,6 +146,463 @@ bool rte_dma_is_valid(int16_t dev_id);
 __rte_experimental
 uint16_t rte_dma_count_avail(void);
 
+/** DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_MEM_TO_MEM		RTE_BIT64(0)
+/** DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_MEM_TO_DEV		RTE_BIT64(1)
+/** DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_DEV_TO_MEM		RTE_BIT64(2)
+/** DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_DEV_TO_DEV		RTE_BIT64(3)
+/** DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_SVA		RTE_BIT64(4)
+/** DMA device support work in silent mode.
+ * In this mode, application don't required to invoke rte_dma_completed*()
+ * API.
+ *
+ * @see struct rte_dma_conf::silent_mode
+ */
+#define RTE_DMA_CAPA_SILENT		RTE_BIT64(5)
+/** DMA device support copy ops.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_OPS_COPY		RTE_BIT64(32)
+/** DMA device support scatter-gather list copy ops.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_OPS_COPY_SG	RTE_BIT64(33)
+/** DMA device support fill ops.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_OPS_FILL		RTE_BIT64(34)
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ *
+ * @see rte_dma_info_get
+ */
+struct rte_dma_info {
+	/** Device capabilities (RTE_DMA_CAPA_*). */
+	uint64_t dev_capa;
+	/** Maximum number of virtual DMA channels supported. */
+	uint16_t max_vchans;
+	/** Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t max_desc;
+	/** Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/** Maximum number of source or destination scatter-gather entry
+	 * supported.
+	 * If the device does not support COPY_SG capability, this value can be
+	 * zero.
+	 * If the device supports COPY_SG capability, then rte_dma_copy_sg()
+	 * parameter nb_src/nb_dst should not exceed this value.
+	 */
+	uint16_t max_sges;
+	/** NUMA node connection, -1 if unknown. */
+	int16_t numa_node;
+	/** Number of virtual DMA channel configured. */
+	uint16_t nb_vchans;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dma_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ *
+ * @see rte_dma_configure
+ */
+struct rte_dma_conf {
+	/** The number of virtual DMA channels to set up for the DMA device.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dma_info which get from rte_dma_info_get().
+	 */
+	uint16_t nb_vchans;
+	/** Indicates whether to enable silent mode.
+	 * false-default mode, true-silent mode.
+	 * This value can be set to true only when the SILENT capability is
+	 * supported.
+	 *
+	 * @see RTE_DMA_CAPA_SILENT
+	 */
+	bool enable_silent;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dma_conf
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_start(int16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dma_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stop(int16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_close(int16_t dev_id);
+
+/**
+ * DMA transfer direction defines.
+ *
+ * @see struct rte_dma_vchan_conf::direction
+ */
+enum rte_dma_direction {
+	/** DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_MEM,
+	/** DMA transfer direction - from memory to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is SoCs memory) to device (which is host memory).
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV,
+	/** DMA transfer direction - from device to memory.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to memory (which is SoCs memory).
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM,
+	/** DMA transfer direction - from device to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to the device (which is another host memory).
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV,
+};
+
+/**
+ * DMA access port type defines.
+ *
+ * @see struct rte_dma_port_param::port_type
+ */
+enum rte_dma_port_type {
+	RTE_DMA_PORT_NONE,
+	RTE_DMA_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dma_vchan_conf::src_port
+ * @see struct rte_dma_vchan_conf::dst_port
+ */
+struct rte_dma_port_param {
+	/** The device access port type.
+	 *
+	 * @see enum rte_dma_port_type
+	 */
+	enum rte_dma_port_type port_type;
+	union {
+		/** PCIe access port parameters.
+		 *
+		 * The following model shows SoC's PCIe module connects to
+		 * multiple PCIe hosts and multiple endpoints. The PCIe module
+		 * has an integrated DMA controller.
+		 *
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * \code{.unparsed}
+		 * System Bus
+		 *    |     ----------PCIe module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIe Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIe Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIe Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * \endcode
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		struct {
+			uint64_t coreid : 4; /**< PCIe core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			/** The pasid filed in TLP packet. */
+			uint64_t pasid : 20;
+			/** The attributes filed in TLP packet. */
+			uint64_t attr : 3;
+			/** The processing hint filed in TLP packet. */
+			uint64_t ph : 2;
+			/** The steering tag filed in TLP packet. */
+			uint64_t st : 16;
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ *
+ * @see rte_dma_vchan_setup
+ */
+struct rte_dma_vchan_conf {
+	/** Transfer direction
+	 *
+	 * @see enum rte_dma_direction
+	 */
+	enum rte_dma_direction direction;
+	/** Number of descriptor for the virtual DMA channel */
+	uint16_t nb_desc;
+	/** 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 *
+	 * @see struct rte_dma_port_param
+	 */
+	struct rte_dma_port_param src_port;
+	/** 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 *
+	 * @see struct rte_dma_port_param
+	 */
+	struct rte_dma_port_param dst_port;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel. The value must be in the range
+ *   [0, nb_vchans - 1] previously supplied to rte_dma_configure().
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dma_vchan_conf object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+			const struct rte_dma_vchan_conf *conf);
+
+/**
+ * A structure used to retrieve statistics.
+ *
+ * @see rte_dma_stats_get
+ */
+struct rte_dma_stats {
+	/** Count of operations which were submitted to hardware. */
+	uint64_t submitted;
+	/** Count of operations which were completed, including successful and
+	 * failed completions.
+	 */
+	uint64_t completed;
+	/** Count of operations which failed to complete. */
+	uint64_t errors;
+};
+
+/**
+ * Special ID, which is used to represent all virtual DMA channels.
+ *
+ * @see rte_dma_stats_get
+ * @see rte_dma_stats_reset
+ */
+#define RTE_DMA_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMA_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dma_stats
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stats_get(int16_t dev_id, uint16_t vchan,
+		      struct rte_dma_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMA_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stats_reset(int16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_dump(int16_t dev_id, FILE *f);
 #include "rte_dmadev_core.h"
 
 #ifdef __cplusplus
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index 1ce2cf0bf1..2436621594 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -20,6 +20,43 @@
 
 #include <rte_dev.h>
 
+struct rte_dma_dev;
+
+/** @internal Used to get device information of a device. */
+typedef int (*rte_dma_info_get_t)(const struct rte_dma_dev *dev,
+				  struct rte_dma_info *dev_info,
+				  uint32_t info_sz);
+
+/** @internal Used to configure a device. */
+typedef int (*rte_dma_configure_t)(struct rte_dma_dev *dev,
+				   const struct rte_dma_conf *dev_conf,
+				   uint32_t conf_sz);
+
+/** @internal Used to start a configured device. */
+typedef int (*rte_dma_start_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to stop a configured device. */
+typedef int (*rte_dma_stop_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to close a configured device. */
+typedef int (*rte_dma_close_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to allocate and set up a virtual DMA channel. */
+typedef int (*rte_dma_vchan_setup_t)(struct rte_dma_dev *dev, uint16_t vchan,
+				const struct rte_dma_vchan_conf *conf,
+				uint32_t conf_sz);
+
+/** @internal Used to retrieve basic statistics. */
+typedef int (*rte_dma_stats_get_t)(const struct rte_dma_dev *dev,
+			uint16_t vchan, struct rte_dma_stats *stats,
+			uint32_t stats_sz);
+
+/** @internal Used to reset basic statistics. */
+typedef int (*rte_dma_stats_reset_t)(struct rte_dma_dev *dev, uint16_t vchan);
+
+/** @internal Used to dump internal information. */
+typedef int (*rte_dma_dump_t)(const struct rte_dma_dev *dev, FILE *f);
+
 /**
  * Possible states of a DMA device.
  *
@@ -34,6 +71,26 @@ enum rte_dma_dev_state {
 
 };
 
+/**
+ * DMA device operations function pointer table.
+ *
+ * @see struct rte_dma_dev:dev_ops
+ */
+struct rte_dma_dev_ops {
+	rte_dma_info_get_t       dev_info_get;
+	rte_dma_configure_t      dev_configure;
+	rte_dma_start_t          dev_start;
+	rte_dma_stop_t           dev_stop;
+	rte_dma_close_t          dev_close;
+
+	rte_dma_vchan_setup_t    vchan_setup;
+
+	rte_dma_stats_get_t      stats_get;
+	rte_dma_stats_reset_t    stats_reset;
+
+	rte_dma_dump_t           dev_dump;
+};
+
 /**
  * @internal
  * The data part, with no function pointers, associated with each DMA device.
@@ -53,6 +110,7 @@ struct rte_dma_dev_data {
 	 * dev_private information.
 	 */
 	void *dev_private;
+	struct rte_dma_conf dev_conf; /**< DMA device configuration. */
 	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
 	uint64_t reserved[2]; /**< Reserved for future fields */
 } __rte_cache_aligned;
@@ -72,6 +130,8 @@ struct rte_dma_dev_data {
 struct rte_dma_dev {
 	void *dev_private; /**< PMD-specific private data. */
 	struct rte_dma_dev_data *data; /**< Pointer to device data. */
+	/** Functions exported by PMD. */
+	const struct rte_dma_dev_ops *dev_ops;
 	/** Device info which supplied during device initialization. */
 	struct rte_device *device;
 	enum rte_dma_dev_state state; /**< Flag indicating the device state. */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index 56ea0332cb..6b7939b10f 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -1,10 +1,19 @@
 EXPERIMENTAL {
 	global:
 
+	rte_dma_close;
+	rte_dma_configure;
 	rte_dma_count_avail;
 	rte_dma_dev_max;
+	rte_dma_dump;
 	rte_dma_get_dev_id;
+	rte_dma_info_get;
 	rte_dma_is_valid;
+	rte_dma_start;
+	rte_dma_stats_get;
+	rte_dma_stats_reset;
+	rte_dma_stop;
+	rte_dma_vchan_setup;
 
 	local: *;
 };
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v22 3/5] dmadev: add data plane function support
  2021-09-16  3:41 ` [dpdk-dev] [PATCH v22 0/5] support dmadev Chengwen Feng
  2021-09-16  3:41   ` [dpdk-dev] [PATCH v22 1/5] dmadev: introduce DMA device library Chengwen Feng
  2021-09-16  3:41   ` [dpdk-dev] [PATCH v22 2/5] dmadev: add control plane function support Chengwen Feng
@ 2021-09-16  3:41   ` Chengwen Feng
  2021-09-16  3:41   ` [dpdk-dev] [PATCH v22 4/5] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
  2021-09-16  3:41   ` [dpdk-dev] [PATCH v22 5/5] app/test: add dmadev API test Chengwen Feng
  4 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-16  3:41 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch add data plane functions for dmadev.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 doc/guides/prog_guide/dmadev.rst       |  22 ++
 doc/guides/rel_notes/release_21_11.rst |   2 +-
 lib/dmadev/rte_dmadev.h                | 461 +++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h           |  41 +++
 lib/dmadev/version.map                 |   6 +
 5 files changed, 531 insertions(+), 1 deletion(-)

diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
index c724fefcb0..44eea23202 100644
--- a/doc/guides/prog_guide/dmadev.rst
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -101,3 +101,25 @@ can be used to get the device info and supported features.
 
 Silent mode is a special device capability which does not require the
 application to invoke dequeue APIs.
+
+
+Enqueue / Dequeue APIs
+~~~~~~~~~~~~~~~~~~~~~~
+
+Enqueue APIs such as ``rte_dma_copy`` and ``rte_dma_fill`` can be used to
+enqueue operations to hardware. If an enqueue is successful, a ``ring_idx`` is
+returned. This ``ring_idx`` can be used by applications to track per operation
+metadata in an application-defined circular ring.
+
+The ``rte_dma_submit`` API is used to issue doorbell to hardware.
+Alternatively the ``RTE_DMA_OP_FLAG_SUBMIT`` flag can be passed to the enqueue
+APIs to also issue the doorbell to hardware.
+
+There are two dequeue APIs ``rte_dma_completed`` and
+``rte_dma_completed_status``, these are used to obtain the results of the
+enqueue requests. ``rte_dma_completed`` will return the number of successfully
+completed operations. ``rte_dma_completed_status`` will return the number of
+completed operations along with the status of each operation (filled into the
+``status`` array passed by user). These two APIs can also return the last
+completed operation's ``ring_idx`` which could help user track operations within
+their own application-defined rings.
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index eb0315aaca..a71853b9c3 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -90,7 +90,7 @@ New Features
 * **Introduced dmadev library with:**
 
   * Device allocation and it's multi-process support.
-  * Control plane functions.
+  * Control and data plane functions.
 
 
 Removed Items
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index a66144488a..be54f2cb9d 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -59,6 +59,8 @@
  *     - rte_dma_vchan_setup()
  *     - rte_dma_start()
  *
+ * Then, the application can invoke dataplane functions to process jobs.
+ *
  * If the application wants to change the configuration (i.e. invoke
  * rte_dma_configure() or rte_dma_vchan_setup()), it must invoke
  * rte_dma_stop() first to stop the device and then do the reconfiguration
@@ -68,6 +70,77 @@
  * Finally, an application can close a dmadev by invoking the rte_dma_close()
  * function.
  *
+ * The dataplane APIs include two parts:
+ * The first part is the submission of operation requests:
+ *     - rte_dma_copy()
+ *     - rte_dma_copy_sg()
+ *     - rte_dma_fill()
+ *     - rte_dma_submit()
+ *
+ * These APIs could work with different virtual DMA channels which have
+ * different contexts.
+ *
+ * The first three APIs are used to submit the operation request to the virtual
+ * DMA channel, if the submission is successful, a positive
+ * ring_idx <= UINT16_MAX is returned, otherwise a negative number is returned.
+ *
+ * The last API is used to issue doorbell to hardware, and also there are flags
+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
+ * same work.
+ * @note When enqueuing a set of jobs to the device, having a separate submit
+ * outside a loop makes for clearer code than having a check for the last
+ * iteration inside the loop to set a special submit flag.  However, for cases
+ * where one item alone is to be submitted or there is a small set of jobs to
+ * be submitted sequentially, having a submit flag provides a lower-overhead
+ * way of doing the submission while still keeping the code clean.
+ *
+ * The second part is to obtain the result of requests:
+ *     - rte_dma_completed()
+ *         - return the number of operation requests completed successfully.
+ *     - rte_dma_completed_status()
+ *         - return the number of operation requests completed.
+ *
+ * @note If the dmadev works in silent mode (@see RTE_DMA_CAPA_SILENT),
+ * application does not invoke the above two completed APIs.
+ *
+ * About the ring_idx which enqueue APIs (e.g. rte_dma_copy(), rte_dma_fill())
+ * return, the rules are as follows:
+ *     - ring_idx for each virtual DMA channel are independent.
+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *       when it reach UINT16_MAX, it wraps back to zero.
+ *     - This ring_idx can be used by applications to track per-operation
+ *       metadata in an application-defined circular ring.
+ *     - The initial ring_idx of a virtual DMA channel is zero, after the
+ *       device is stopped, the ring_idx needs to be reset to zero.
+ *
+ * One example:
+ *     - step-1: start one dmadev
+ *     - step-2: enqueue a copy operation, the ring_idx return is 0
+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1
+ *     - ...
+ *     - step-101: stop the dmadev
+ *     - step-102: start the dmadev
+ *     - step-103: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *
+ * The DMA operation address used in enqueue APIs (i.e. rte_dma_copy(),
+ * rte_dma_copy_sg(), rte_dma_fill()) is defined as rte_iova_t type.
+ *
+ * The dmadev supports two types of address: memory address and device address.
+ *
+ * - memory address: the source and destination address of the memory-to-memory
+ * transfer type, or the source address of the memory-to-device transfer type,
+ * or the destination address of the device-to-memory transfer type.
+ * @note If the device support SVA (@see RTE_DMA_CAPA_SVA), the memory address
+ * can be any VA address, otherwise it must be an IOVA address.
+ *
+ * - device address: the source and destination address of the device-to-device
+ * transfer type, or the source address of the device-to-memory transfer type,
+ * or the destination address of the memory-to-device transfer type.
+ *
  * About MT-safe, all the functions of the dmadev API exported by a PMD are
  * lock-free functions which assume to not be invoked in parallel on different
  * logical cores to work on the same target dmadev object.
@@ -603,8 +676,396 @@ int rte_dma_stats_reset(int16_t dev_id, uint16_t vchan);
  */
 __rte_experimental
 int rte_dma_dump(int16_t dev_id, FILE *f);
+
+/**
+ * DMA transfer result status code defines.
+ *
+ * @see rte_dma_completed_status
+ */
+enum rte_dma_status_code {
+	/** The operation completed successfully. */
+	RTE_DMA_STATUS_SUCCESSFUL,
+	/** The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_USER_ABORT,
+	/** The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPTED,
+	/** The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/** The operation failed to complete due invalid destination address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/** The operation failed to complete due invalid source or destination
+	 * address, cover the case that only knows the address error, but not
+	 * sure which address error.
+	 */
+	RTE_DMA_STATUS_INVALID_ADDR,
+	/** The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/** The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/** The operation failed to complete due bus read error. */
+	RTE_DMA_STATUS_BUS_READ_ERROR,
+	/** The operation failed to complete due bus write error. */
+	RTE_DMA_STATUS_BUS_WRITE_ERROR,
+	/** The operation failed to complete due bus error, cover the case that
+	 * only knows the bus error, but not sure which direction error.
+	 */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/** The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DATA_POISION,
+	/** The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/** The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the memory-to-device/
+	 * device-to-memory/device-to-device transfer scenario.
+	 */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/** The operation failed to complete due lookup page fault. */
+	RTE_DMA_STATUS_PAGE_FAULT,
+	/** The operation failed to complete due unknown reason.
+	 * The initial value is 256, which reserves space for future errors.
+	 */
+	RTE_DMA_STATUS_ERROR_UNKNOWN = 0x100,
+};
+
+/**
+ * A structure used to hold scatter-gather DMA operation request entry.
+ *
+ * @see rte_dma_copy_sg
+ */
+struct rte_dma_sge {
+	rte_iova_t addr; /**< The DMA operation address. */
+	uint32_t length; /**< The DMA operation length. */
+};
+
 #include "rte_dmadev_core.h"
 
+/** DMA fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ * If the specify DMA HW works in-order (it means it has default fence between
+ * operations), this flag could be NOP.
+ *
+ * @see rte_dma_copy()
+ * @see rte_dma_copy_sg()
+ * @see rte_dma_fill()
+ */
+#define RTE_DMA_OP_FLAG_FENCE	RTE_BIT64(0)
+/** DMA submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ *
+ * @see rte_dma_copy()
+ * @see rte_dma_copy_sg()
+ * @see rte_dma_fill()
+ */
+#define RTE_DMA_OP_FLAG_SUBMIT	RTE_BIT64(1)
+/** DMA write data to low level cache hint.
+ * Used for performance optimization, this is just a hint, and there is no
+ * capability bit for this, driver should not return error if this flag was set.
+ *
+ * @see rte_dma_copy()
+ * @see rte_dma_copy_sg()
+ * @see rte_dma_fill()
+ */
+#define RTE_DMA_OP_FLAG_LLC	RTE_BIT64(2)
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+static inline int
+rte_dma_copy(int16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+	     uint32_t length, uint64_t flags)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+#endif
+
+	return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter-gather list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter-gather list copy operation to be performed by
+ * hardware, if the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then
+ * trigger doorbell to begin this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The pointer of source scatter-gather entry array.
+ * @param dst
+ *   The pointer of destination scatter-gather entry array.
+ * @param nb_src
+ *   The number of source scatter-gather entry.
+ *   @see struct rte_dma_info::max_sges
+ * @param nb_dst
+ *   The number of destination scatter-gather entry.
+ *   @see struct rte_dma_info::max_sges
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+static inline int
+rte_dma_copy_sg(int16_t dev_id, uint16_t vchan, struct rte_dma_sge *src,
+		struct rte_dma_sge *dst, uint16_t nb_src, uint16_t nb_dst,
+		uint64_t flags)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans ||
+	    src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+#endif
+
+	return (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+static inline int
+rte_dma_fill(int16_t dev_id, uint16_t vchan, uint64_t pattern,
+	     rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+#endif
+
+	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dma_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+static inline int
+rte_dma_submit(int16_t dev_id, uint16_t vchan)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+#endif
+
+	return (*dev->submit)(dev, vchan);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's ring_idx.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed. This return value
+ *   must be less than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dma_completed(int16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		  uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans || nb_cpls == 0)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been completed, and the
+ * operations result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's ring_idx.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   This is a pointer to an array of length 'nb_cpls' that holds the completion
+ *   status code of each operation.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that completed. This return value must be less
+ *   than or equal to the value of nb_cpls.
+ *   If this number is greater than zero (assuming n), then n values in the
+ *   status array are also set.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dma_completed_status(int16_t dev_id, uint16_t vchan,
+			 const uint16_t nb_cpls, uint16_t *last_idx,
+			 enum rte_dma_status_code *status)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans ||
+	    nb_cpls == 0 || status == NULL)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
+}
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index 2436621594..edb3286cbb 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -57,6 +57,36 @@ typedef int (*rte_dma_stats_reset_t)(struct rte_dma_dev *dev, uint16_t vchan);
 /** @internal Used to dump internal information. */
 typedef int (*rte_dma_dump_t)(const struct rte_dma_dev *dev, FILE *f);
 
+/** @internal Used to enqueue a copy operation. */
+typedef int (*rte_dma_copy_t)(struct rte_dma_dev *dev, uint16_t vchan,
+			      rte_iova_t src, rte_iova_t dst,
+			      uint32_t length, uint64_t flags);
+
+/** @internal Used to enqueue a scatter-gather list copy operation. */
+typedef int (*rte_dma_copy_sg_t)(struct rte_dma_dev *dev, uint16_t vchan,
+				 const struct rte_dma_sge *src,
+				 const struct rte_dma_sge *dst,
+				 uint16_t nb_src, uint16_t nb_dst,
+				 uint64_t flags);
+
+/** @internal Used to enqueue a fill operation. */
+typedef int (*rte_dma_fill_t)(struct rte_dma_dev *dev, uint16_t vchan,
+			      uint64_t pattern, rte_iova_t dst,
+			      uint32_t length, uint64_t flags);
+
+/** @internal Used to trigger hardware to begin working. */
+typedef int (*rte_dma_submit_t)(struct rte_dma_dev *dev, uint16_t vchan);
+
+/** @internal Used to return number of successful completed operations. */
+typedef uint16_t (*rte_dma_completed_t)(struct rte_dma_dev *dev,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+
+/** @internal Used to return number of completed operations. */
+typedef uint16_t (*rte_dma_completed_status_t)(struct rte_dma_dev *dev,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+
 /**
  * Possible states of a DMA device.
  *
@@ -129,6 +159,17 @@ struct rte_dma_dev_data {
  */
 struct rte_dma_dev {
 	void *dev_private; /**< PMD-specific private data. */
+	rte_dma_copy_t             copy;
+	rte_dma_copy_sg_t          copy_sg;
+	rte_dma_fill_t             fill;
+	rte_dma_submit_t           submit;
+	rte_dma_completed_t        completed;
+	rte_dma_completed_status_t completed_status;
+	void *reserved_cl0;
+	/** Reserve space for future IO functions, while keeping data and
+	 * dev_ops pointers on the second cacheline.
+	 */
+	void *reserved_cl1[6];
 	struct rte_dma_dev_data *data; /**< Pointer to device data. */
 	/** Functions exported by PMD. */
 	const struct rte_dma_dev_ops *dev_ops;
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index 6b7939b10f..c780463bb2 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -2,10 +2,15 @@ EXPERIMENTAL {
 	global:
 
 	rte_dma_close;
+	rte_dma_completed;
+	rte_dma_completed_status;
 	rte_dma_configure;
+	rte_dma_copy;
+	rte_dma_copy_sg;
 	rte_dma_count_avail;
 	rte_dma_dev_max;
 	rte_dma_dump;
+	rte_dma_fill;
 	rte_dma_get_dev_id;
 	rte_dma_info_get;
 	rte_dma_is_valid;
@@ -13,6 +18,7 @@ EXPERIMENTAL {
 	rte_dma_stats_get;
 	rte_dma_stats_reset;
 	rte_dma_stop;
+	rte_dma_submit;
 	rte_dma_vchan_setup;
 
 	local: *;
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v22 4/5] dma/skeleton: introduce skeleton dmadev driver
  2021-09-16  3:41 ` [dpdk-dev] [PATCH v22 0/5] support dmadev Chengwen Feng
                     ` (2 preceding siblings ...)
  2021-09-16  3:41   ` [dpdk-dev] [PATCH v22 3/5] dmadev: add data " Chengwen Feng
@ 2021-09-16  3:41   ` Chengwen Feng
  2021-09-16  3:41   ` [dpdk-dev] [PATCH v22 5/5] app/test: add dmadev API test Chengwen Feng
  4 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-16  3:41 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

Skeleton dmadevice driver, on the lines of rawdev skeleton, is for
showcasing of the dmadev library.

Design of skeleton involves a virtual device which is plugged into VDEV
bus on initialization.

Also, enable compilation of dmadev skeleton drivers.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 MAINTAINERS                            |   1 +
 drivers/dma/meson.build                |   4 +-
 drivers/dma/skeleton/meson.build       |   7 +
 drivers/dma/skeleton/skeleton_dmadev.c | 571 +++++++++++++++++++++++++
 drivers/dma/skeleton/skeleton_dmadev.h |  61 +++
 drivers/dma/skeleton/version.map       |   3 +
 6 files changed, 646 insertions(+), 1 deletion(-)
 create mode 100644 drivers/dma/skeleton/meson.build
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
 create mode 100644 drivers/dma/skeleton/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 8af9522a5a..418f775382 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -455,6 +455,7 @@ F: doc/guides/regexdevs/features/default.ini
 DMA device API - EXPERIMENTAL
 M: Chengwen Feng <fengchengwen@huawei.com>
 F: lib/dmadev/
+F: drivers/dma/skeleton/
 F: doc/guides/prog_guide/dmadev.rst
 
 Eventdev API
diff --git a/drivers/dma/meson.build b/drivers/dma/meson.build
index a24c56d8ff..d9c7ede32f 100644
--- a/drivers/dma/meson.build
+++ b/drivers/dma/meson.build
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright 2021 HiSilicon Limited
 
-drivers = []
+drivers = [
+        'skeleton',
+]
diff --git a/drivers/dma/skeleton/meson.build b/drivers/dma/skeleton/meson.build
new file mode 100644
index 0000000000..8871b80956
--- /dev/null
+++ b/drivers/dma/skeleton/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited
+
+deps += ['dmadev', 'kvargs', 'ring', 'bus_vdev']
+sources = files(
+        'skeleton_dmadev.c',
+)
diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c
new file mode 100644
index 0000000000..876878bb78
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.c
@@ -0,0 +1,571 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+#include <inttypes.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_cycles.h>
+#include <rte_eal.h>
+#include <rte_kvargs.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+
+#include <rte_dmadev_pmd.h>
+
+#include "skeleton_dmadev.h"
+
+RTE_LOG_REGISTER_DEFAULT(skeldma_logtype, INFO);
+#define SKELDMA_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, skeldma_logtype, "%s(): " fmt "\n", \
+		__func__, ##args)
+
+/* Count of instances, currently only 1 is supported. */
+static uint16_t skeldma_count;
+
+static int
+skeldma_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
+		 uint32_t info_sz)
+{
+#define SKELDMA_MAX_DESC	8192
+#define SKELDMA_MIN_DESC	32
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(info_sz);
+
+	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
+			     RTE_DMA_CAPA_SVA |
+			     RTE_DMA_CAPA_OPS_COPY;
+	dev_info->max_vchans = 1;
+	dev_info->max_desc = SKELDMA_MAX_DESC;
+	dev_info->min_desc = SKELDMA_MIN_DESC;
+
+	return 0;
+}
+
+static int
+skeldma_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf,
+		  uint32_t conf_sz)
+{
+	RTE_SET_USED(dev);
+	RTE_SET_USED(conf);
+	RTE_SET_USED(conf_sz);
+	return 0;
+}
+
+static void *
+cpucopy_thread(void *param)
+{
+#define SLEEP_THRESHOLD		10000
+#define SLEEP_US_VAL		10
+
+	struct rte_dma_dev *dev = param;
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	int ret;
+
+	while (!hw->exit_flag) {
+		ret = rte_ring_dequeue(hw->desc_running, (void **)&desc);
+		if (ret) {
+			hw->zero_req_count++;
+			if (hw->zero_req_count > SLEEP_THRESHOLD) {
+				if (hw->zero_req_count == 0)
+					hw->zero_req_count = SLEEP_THRESHOLD;
+				rte_delay_us_sleep(SLEEP_US_VAL);
+			}
+			continue;
+		}
+
+		hw->zero_req_count = 0;
+		rte_memcpy(desc->dst, desc->src, desc->len);
+		hw->completed_count++;
+		(void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
+	}
+
+	return NULL;
+}
+
+static void
+fflush_ring(struct skeldma_hw *hw, struct rte_ring *ring)
+{
+	struct skeldma_desc *desc = NULL;
+	while (rte_ring_count(ring) > 0) {
+		(void)rte_ring_dequeue(ring, (void **)&desc);
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+}
+
+static int
+skeldma_start(struct rte_dma_dev *dev)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	rte_cpuset_t cpuset;
+	int ret;
+
+	if (hw->desc_mem == NULL) {
+		SKELDMA_LOG(ERR, "Vchan was not setup, start fail!");
+		return -EINVAL;
+	}
+
+	/* Reset the dmadev to a known state, include:
+	 * 1) fflush pending/running/completed ring to empty ring.
+	 * 2) init ring idx to zero.
+	 * 3) init running statistics.
+	 * 4) mark cpucopy task exit_flag to false.
+	 */
+	fflush_ring(hw, hw->desc_pending);
+	fflush_ring(hw, hw->desc_running);
+	fflush_ring(hw, hw->desc_completed);
+	hw->ridx = 0;
+	hw->submitted_count = 0;
+	hw->zero_req_count = 0;
+	hw->completed_count = 0;
+	hw->exit_flag = false;
+
+	rte_mb();
+
+	ret = rte_ctrl_thread_create(&hw->thread, "dma_skeleton", NULL,
+				     cpucopy_thread, dev);
+	if (ret) {
+		SKELDMA_LOG(ERR, "Start cpucopy thread fail!");
+		return -EINVAL;
+	}
+
+	if (hw->lcore_id != -1) {
+		cpuset = rte_lcore_cpuset(hw->lcore_id);
+		ret = pthread_setaffinity_np(hw->thread, sizeof(cpuset),
+					     &cpuset);
+		if (ret)
+			SKELDMA_LOG(WARNING,
+				"Set thread affinity lcore = %d fail!",
+				hw->lcore_id);
+	}
+
+	return 0;
+}
+
+static int
+skeldma_stop(struct rte_dma_dev *dev)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	hw->exit_flag = true;
+	rte_delay_ms(1);
+
+	pthread_cancel(hw->thread);
+	pthread_join(hw->thread, NULL);
+
+	return 0;
+}
+
+static int
+vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc)
+{
+	struct skeldma_desc *desc;
+	struct rte_ring *empty;
+	struct rte_ring *pending;
+	struct rte_ring *running;
+	struct rte_ring *completed;
+	uint16_t i;
+
+	desc = rte_zmalloc_socket("dma_skelteon_desc",
+				  nb_desc * sizeof(struct skeldma_desc),
+				  RTE_CACHE_LINE_SIZE, hw->socket_id);
+	if (desc == NULL) {
+		SKELDMA_LOG(ERR, "Malloc dma skeleton desc fail!");
+		return -ENOMEM;
+	}
+
+	empty = rte_ring_create("dma_skeleton_desc_empty", nb_desc,
+				hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	pending = rte_ring_create("dma_skeleton_desc_pending", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	running = rte_ring_create("dma_skeleton_desc_running", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	completed = rte_ring_create("dma_skeleton_desc_completed", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (empty == NULL || pending == NULL || running == NULL ||
+	    completed == NULL) {
+		SKELDMA_LOG(ERR, "Create dma skeleton desc ring fail!");
+		rte_ring_free(empty);
+		rte_ring_free(pending);
+		rte_ring_free(running);
+		rte_ring_free(completed);
+		rte_free(desc);
+		return -ENOMEM;
+	}
+
+	/* The real usable ring size is *count-1* instead of *count* to
+	 * differentiate a free ring from an empty ring.
+	 * @see rte_ring_create
+	 */
+	for (i = 0; i < nb_desc - 1; i++)
+		(void)rte_ring_enqueue(empty, (void *)(desc + i));
+
+	hw->desc_mem = desc;
+	hw->desc_empty = empty;
+	hw->desc_pending = pending;
+	hw->desc_running = running;
+	hw->desc_completed = completed;
+
+	return 0;
+}
+
+static void
+vchan_release(struct skeldma_hw *hw)
+{
+	if (hw->desc_mem == NULL)
+		return;
+
+	rte_free(hw->desc_mem);
+	hw->desc_mem = NULL;
+	rte_ring_free(hw->desc_empty);
+	hw->desc_empty = NULL;
+	rte_ring_free(hw->desc_pending);
+	hw->desc_pending = NULL;
+	rte_ring_free(hw->desc_running);
+	hw->desc_running = NULL;
+	rte_ring_free(hw->desc_completed);
+	hw->desc_completed = NULL;
+}
+
+static int
+skeldma_close(struct rte_dma_dev *dev)
+{
+	/* The device already stopped */
+	vchan_release(dev->dev_private);
+	return 0;
+}
+
+static int
+skeldma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
+		    const struct rte_dma_vchan_conf *conf,
+		    uint32_t conf_sz)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(conf_sz);
+
+	if (!rte_is_power_of_2(conf->nb_desc)) {
+		SKELDMA_LOG(ERR, "Number of desc must be power of 2!");
+		return -EINVAL;
+	}
+
+	vchan_release(hw);
+	return vchan_setup(hw, conf->nb_desc);
+}
+
+static int
+skeldma_stats_get(const struct rte_dma_dev *dev, uint16_t vchan,
+		  struct rte_dma_stats *stats, uint32_t stats_sz)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(stats_sz);
+
+	stats->submitted = hw->submitted_count;
+	stats->completed = hw->completed_count;
+	stats->errors = 0;
+
+	return 0;
+}
+
+static int
+skeldma_stats_reset(struct rte_dma_dev *dev, uint16_t vchan)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+
+	hw->submitted_count = 0;
+	hw->completed_count = 0;
+
+	return 0;
+}
+
+static int
+skeldma_dump(const struct rte_dma_dev *dev, FILE *f)
+{
+#define GET_RING_COUNT(ring)	((ring) ? (rte_ring_count(ring)) : 0)
+
+	struct skeldma_hw *hw = dev->dev_private;
+
+	fprintf(f,
+		"    lcore_id: %d\n"
+		"    socket_id: %d\n"
+		"    desc_empty_ring_count: %u\n"
+		"    desc_pending_ring_count: %u\n"
+		"    desc_running_ring_count: %u\n"
+		"    desc_completed_ring_count: %u\n",
+		hw->lcore_id, hw->socket_id,
+		GET_RING_COUNT(hw->desc_empty),
+		GET_RING_COUNT(hw->desc_pending),
+		GET_RING_COUNT(hw->desc_running),
+		GET_RING_COUNT(hw->desc_completed));
+	fprintf(f,
+		"    next_ring_idx: %u\n"
+		"    submitted_count: %" PRIu64 "\n"
+		"    completed_count: %" PRIu64 "\n",
+		hw->ridx, hw->submitted_count, hw->completed_count);
+
+	return 0;
+}
+
+static inline void
+submit(struct skeldma_hw *hw, struct skeldma_desc *desc)
+{
+	uint16_t count = rte_ring_count(hw->desc_pending);
+	struct skeldma_desc *pend_desc = NULL;
+
+	while (count > 0) {
+		(void)rte_ring_dequeue(hw->desc_pending, (void **)&pend_desc);
+		(void)rte_ring_enqueue(hw->desc_running, (void *)pend_desc);
+		count--;
+	}
+
+	if (desc)
+		(void)rte_ring_enqueue(hw->desc_running, (void *)desc);
+}
+
+static int
+skeldma_copy(struct rte_dma_dev *dev, uint16_t vchan,
+	     rte_iova_t src, rte_iova_t dst,
+	     uint32_t length, uint64_t flags)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc;
+	int ret;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(flags);
+
+	ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc);
+	if (ret)
+		return -ENOSPC;
+	desc->src = (void *)(uintptr_t)src;
+	desc->dst = (void *)(uintptr_t)dst;
+	desc->len = length;
+	desc->ridx = hw->ridx;
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT)
+		submit(hw, desc);
+	else
+		(void)rte_ring_enqueue(hw->desc_pending, (void *)desc);
+	hw->submitted_count++;
+
+	return hw->ridx++;
+}
+
+static int
+skeldma_submit(struct rte_dma_dev *dev, uint16_t vchan)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	RTE_SET_USED(vchan);
+	submit(hw, NULL);
+	return 0;
+}
+
+static uint16_t
+skeldma_completed(struct rte_dma_dev *dev,
+		  uint16_t vchan, const uint16_t nb_cpls,
+		  uint16_t *last_idx, bool *has_error)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	uint16_t index = 0;
+	uint16_t count;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(has_error);
+
+	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+	while (index < count) {
+		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+		if (index == count - 1)
+			*last_idx = desc->ridx;
+		index++;
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+
+	return count;
+}
+
+static uint16_t
+skeldma_completed_status(struct rte_dma_dev *dev,
+			 uint16_t vchan, const uint16_t nb_cpls,
+			 uint16_t *last_idx, enum rte_dma_status_code *status)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	uint16_t index = 0;
+	uint16_t count;
+
+	RTE_SET_USED(vchan);
+
+	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+	while (index < count) {
+		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+		if (index == count - 1)
+			*last_idx = desc->ridx;
+		status[index++] = RTE_DMA_STATUS_SUCCESSFUL;
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+
+	return count;
+}
+
+static const struct rte_dma_dev_ops skeldma_ops = {
+	.dev_info_get  = skeldma_info_get,
+	.dev_configure = skeldma_configure,
+	.dev_start     = skeldma_start,
+	.dev_stop      = skeldma_stop,
+	.dev_close     = skeldma_close,
+
+	.vchan_setup   = skeldma_vchan_setup,
+
+	.stats_get     = skeldma_stats_get,
+	.stats_reset   = skeldma_stats_reset,
+
+	.dev_dump      = skeldma_dump,
+};
+
+static int
+skeldma_create(const char *name, struct rte_vdev_device *vdev, int lcore_id)
+{
+	struct rte_dma_dev *dev;
+	struct skeldma_hw *hw;
+	int socket_id;
+
+	socket_id = (lcore_id < 0) ? rte_socket_id() :
+				     rte_lcore_to_socket_id(lcore_id);
+	dev = rte_dma_pmd_allocate(name, socket_id, sizeof(struct skeldma_hw));
+	if (dev == NULL) {
+		SKELDMA_LOG(ERR, "Unable to allocate dmadev: %s", name);
+		return -EINVAL;
+	}
+
+	dev->copy = skeldma_copy;
+	dev->submit = skeldma_submit;
+	dev->completed = skeldma_completed;
+	dev->completed_status = skeldma_completed_status;
+	dev->dev_ops = &skeldma_ops;
+	dev->device = &vdev->device;
+
+	hw = dev->dev_private;
+	hw->lcore_id = lcore_id;
+	hw->socket_id = socket_id;
+
+	dev->state = RTE_DMA_DEV_READY;
+
+	return dev->data->dev_id;
+}
+
+static int
+skeldma_destroy(const char *name)
+{
+	return rte_dma_pmd_release(name);
+}
+
+static int
+skeldma_parse_lcore(const char *key __rte_unused,
+		    const char *value,
+		    void *opaque)
+{
+	int lcore_id = atoi(value);
+	if (lcore_id >= 0 && lcore_id < RTE_MAX_LCORE)
+		*(int *)opaque = lcore_id;
+	return 0;
+}
+
+static void
+skeldma_parse_vdev_args(struct rte_vdev_device *vdev, int *lcore_id)
+{
+	static const char *const args[] = {
+		SKELDMA_ARG_LCORE,
+		NULL
+	};
+
+	struct rte_kvargs *kvlist;
+	const char *params;
+
+	params = rte_vdev_device_args(vdev);
+	if (params == NULL || params[0] == '\0')
+		return;
+
+	kvlist = rte_kvargs_parse(params, args);
+	if (!kvlist)
+		return;
+
+	(void)rte_kvargs_process(kvlist, SKELDMA_ARG_LCORE,
+				 skeldma_parse_lcore, lcore_id);
+	SKELDMA_LOG(INFO, "Parse lcore_id = %d", *lcore_id);
+
+	rte_kvargs_free(kvlist);
+}
+
+static int
+skeldma_probe(struct rte_vdev_device *vdev)
+{
+	const char *name;
+	int lcore_id = -1;
+	int ret;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		SKELDMA_LOG(ERR, "Multiple process not supported for %s", name);
+		return -EINVAL;
+	}
+
+	/* More than one instance is not supported */
+	if (skeldma_count > 0) {
+		SKELDMA_LOG(ERR, "Multiple instance not supported for %s",
+			name);
+		return -EINVAL;
+	}
+
+	skeldma_parse_vdev_args(vdev, &lcore_id);
+
+	ret = skeldma_create(name, vdev, lcore_id);
+	if (ret >= 0) {
+		SKELDMA_LOG(INFO, "Create %s dmadev with lcore-id %d",
+			name, lcore_id);
+		skeldma_count = 1;
+	}
+
+	return ret < 0 ? ret : 0;
+}
+
+static int
+skeldma_remove(struct rte_vdev_device *vdev)
+{
+	const char *name;
+	int ret;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -1;
+
+	ret = skeldma_destroy(name);
+	if (!ret) {
+		skeldma_count = 0;
+		SKELDMA_LOG(INFO, "Remove %s dmadev", name);
+	}
+
+	return ret;
+}
+
+static struct rte_vdev_driver skeldma_pmd_drv = {
+	.probe = skeldma_probe,
+	.remove = skeldma_remove,
+	.drv_flags = RTE_VDEV_DRV_NEED_IOVA_AS_VA,
+};
+
+RTE_PMD_REGISTER_VDEV(dma_skeleton, skeldma_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(dma_skeleton,
+		SKELDMA_ARG_LCORE "=<uint16> ");
diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h
new file mode 100644
index 0000000000..eaa52364bf
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+#ifndef SKELETON_DMADEV_H
+#define SKELETON_DMADEV_H
+
+#include <pthread.h>
+
+#include <rte_ring.h>
+
+#define SKELDMA_ARG_LCORE	"lcore"
+
+struct skeldma_desc {
+	void *src;
+	void *dst;
+	uint32_t len;
+	uint16_t ridx; /* ring idx */
+};
+
+struct skeldma_hw {
+	int lcore_id; /* cpucopy task affinity core */
+	int socket_id;
+	pthread_t thread; /* cpucopy task thread */
+	volatile int exit_flag; /* cpucopy task exit flag */
+
+	struct skeldma_desc *desc_mem;
+
+	/* Descriptor ring state machine:
+	 *
+	 *  -----------     enqueue without submit     -----------
+	 *  |  empty  |------------------------------->| pending |
+	 *  -----------\                               -----------
+	 *       ^      \------------                       |
+	 *       |                  |                       |submit doorbell
+	 *       |                  |                       |
+	 *       |                  |enqueue with submit    |
+	 *       |get completed     |------------------|    |
+	 *       |                                     |    |
+	 *       |                                     v    v
+	 *  -----------     cpucopy thread working     -----------
+	 *  |completed|<-------------------------------| running |
+	 *  -----------                                -----------
+	 */
+	struct rte_ring *desc_empty;
+	struct rte_ring *desc_pending;
+	struct rte_ring *desc_running;
+	struct rte_ring *desc_completed;
+
+	/* Cache delimiter for dataplane API's operation data */
+	char cache1 __rte_cache_aligned;
+	uint16_t ridx;  /* ring idx */
+	uint64_t submitted_count;
+
+	/* Cache delimiter for cpucopy thread's operation data */
+	char cache2 __rte_cache_aligned;
+	uint32_t zero_req_count;
+	uint64_t completed_count;
+};
+
+#endif /* SKELETON_DMADEV_H */
diff --git a/drivers/dma/skeleton/version.map b/drivers/dma/skeleton/version.map
new file mode 100644
index 0000000000..c2e0723b4c
--- /dev/null
+++ b/drivers/dma/skeleton/version.map
@@ -0,0 +1,3 @@
+DPDK_22 {
+	local: *;
+};
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v22 5/5] app/test: add dmadev API test
  2021-09-16  3:41 ` [dpdk-dev] [PATCH v22 0/5] support dmadev Chengwen Feng
                     ` (3 preceding siblings ...)
  2021-09-16  3:41   ` [dpdk-dev] [PATCH v22 4/5] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
@ 2021-09-16  3:41   ` Chengwen Feng
  4 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-16  3:41 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch add dmadev API test which based on 'dma_skeleton' vdev. The
test cases could be executed using 'dmadev_autotest' command in test
framework.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 MAINTAINERS                |   1 +
 app/test/meson.build       |   4 +
 app/test/test_dmadev.c     |  43 +++
 app/test/test_dmadev_api.c | 539 +++++++++++++++++++++++++++++++++++++
 4 files changed, 587 insertions(+)
 create mode 100644 app/test/test_dmadev.c
 create mode 100644 app/test/test_dmadev_api.c

diff --git a/MAINTAINERS b/MAINTAINERS
index 418f775382..9966797d12 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -456,6 +456,7 @@ DMA device API - EXPERIMENTAL
 M: Chengwen Feng <fengchengwen@huawei.com>
 F: lib/dmadev/
 F: drivers/dma/skeleton/
+F: app/test/test_dmadev*
 F: doc/guides/prog_guide/dmadev.rst
 
 Eventdev API
diff --git a/app/test/meson.build b/app/test/meson.build
index a7611686ad..9027eba3a4 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -43,6 +43,8 @@ test_sources = files(
         'test_debug.c',
         'test_distributor.c',
         'test_distributor_perf.c',
+        'test_dmadev.c',
+        'test_dmadev_api.c',
         'test_eal_flags.c',
         'test_eal_fs.c',
         'test_efd.c',
@@ -162,6 +164,7 @@ test_deps = [
         'cmdline',
         'cryptodev',
         'distributor',
+        'dmadev',
         'efd',
         'ethdev',
         'eventdev',
@@ -333,6 +336,7 @@ driver_test_names = [
         'cryptodev_sw_mvsam_autotest',
         'cryptodev_sw_snow3g_autotest',
         'cryptodev_sw_zuc_autotest',
+        'dmadev_autotest',
         'eventdev_selftest_octeontx',
         'eventdev_selftest_sw',
         'rawdev_autotest',
diff --git a/app/test/test_dmadev.c b/app/test/test_dmadev.c
new file mode 100644
index 0000000000..e765ec5f2c
--- /dev/null
+++ b/app/test/test_dmadev.c
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include <rte_dmadev.h>
+#include <rte_bus_vdev.h>
+
+#include "test.h"
+
+/* from test_dmadev_api.c */
+extern int test_dma_api(uint16_t dev_id);
+
+static int
+test_apis(void)
+{
+	const char *pmd = "dma_skeleton";
+	int id;
+	int ret;
+
+	if (rte_vdev_init(pmd, NULL) < 0)
+		return TEST_SKIPPED;
+	id = rte_dma_get_dev_id(pmd);
+	if (id < 0)
+		return TEST_SKIPPED;
+	printf("\n### Test dmadev infrastructure using skeleton driver\n");
+	ret = test_dma_api(id);
+	rte_vdev_uninit(pmd);
+
+	return ret;
+}
+
+static int
+test_dma(void)
+{
+	/* basic sanity on dmadev infrastructure */
+	if (test_apis() < 0)
+		return -1;
+
+	return 0;
+}
+
+REGISTER_TEST_COMMAND(dmadev_autotest, test_dma);
diff --git a/app/test/test_dmadev_api.c b/app/test/test_dmadev_api.c
new file mode 100644
index 0000000000..4f7024e1cb
--- /dev/null
+++ b/app/test/test_dmadev_api.c
@@ -0,0 +1,539 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+#include <string.h>
+
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_test.h>
+#include <rte_dmadev.h>
+
+extern int test_dma_api(uint16_t dev_id);
+
+#define DMA_TEST_API_RUN(test) \
+	testsuite_run_test(test, #test)
+
+#define TEST_MEMCPY_SIZE	1024
+#define TEST_WAIT_US_VAL	50000
+
+#define TEST_SUCCESS 0
+#define TEST_FAILED  -1
+
+static int16_t test_dev_id;
+static int16_t invalid_dev_id;
+
+static char *src;
+static char *dst;
+
+static int total;
+static int passed;
+static int failed;
+
+static int
+testsuite_setup(int16_t dev_id)
+{
+	test_dev_id = dev_id;
+	invalid_dev_id = -1;
+
+	src = rte_malloc("dmadev_test_src", TEST_MEMCPY_SIZE, 0);
+	if (src == NULL)
+		return -ENOMEM;
+	dst = rte_malloc("dmadev_test_dst", TEST_MEMCPY_SIZE, 0);
+	if (dst == NULL) {
+		rte_free(src);
+		src = NULL;
+		return -ENOMEM;
+	}
+
+	total = 0;
+	passed = 0;
+	failed = 0;
+
+	/* Set dmadev log level to critical to suppress unnecessary output
+	 * during API tests.
+	 */
+	rte_log_set_level_pattern("lib.dmadev", RTE_LOG_CRIT);
+
+	return 0;
+}
+
+static void
+testsuite_teardown(void)
+{
+	rte_free(src);
+	src = NULL;
+	rte_free(dst);
+	dst = NULL;
+	/* Ensure the dmadev is stopped. */
+	rte_dma_stop(test_dev_id);
+
+	rte_log_set_level_pattern("lib.dmadev", RTE_LOG_INFO);
+}
+
+static void
+testsuite_run_test(int (*test)(void), const char *name)
+{
+	int ret = 0;
+
+	if (test) {
+		ret = test();
+		if (ret < 0) {
+			failed++;
+			printf("%s Failed\n", name);
+		} else {
+			passed++;
+			printf("%s Passed\n", name);
+		}
+	}
+
+	total++;
+}
+
+static int
+test_dma_get_dev_id(void)
+{
+	int ret = rte_dma_get_dev_id("invalid_dmadev_device");
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_is_valid_dev(void)
+{
+	int ret;
+	ret = rte_dma_is_valid(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == false, "Expected false for invalid dev id");
+	ret = rte_dma_is_valid(test_dev_id);
+	RTE_TEST_ASSERT(ret == true, "Expected true for valid dev id");
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_count(void)
+{
+	uint16_t count = rte_dma_count_avail();
+	RTE_TEST_ASSERT(count > 0, "Invalid dmadev count %u", count);
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_info_get(void)
+{
+	struct rte_dma_info info =  { 0 };
+	int ret;
+
+	ret = rte_dma_info_get(invalid_dev_id, &info);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_info_get(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_configure(void)
+{
+	struct rte_dma_conf conf = { 0 };
+	struct rte_dma_info info = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_configure(invalid_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_configure(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for nb_vchans == 0 */
+	memset(&conf, 0, sizeof(conf));
+	ret = rte_dma_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for conf.nb_vchans > info.max_vchans */
+	ret = rte_dma_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans + 1;
+	ret = rte_dma_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check enable silent mode */
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans;
+	conf.enable_silent = true;
+	ret = rte_dma_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Configure success */
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans;
+	ret = rte_dma_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret);
+
+	/* Check configure success */
+	ret = rte_dma_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	RTE_TEST_ASSERT_EQUAL(conf.nb_vchans, info.nb_vchans,
+			      "Configure nb_vchans not match");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_vchan_setup(void)
+{
+	struct rte_dma_vchan_conf vchan_conf = { 0 };
+	struct rte_dma_conf dev_conf = { 0 };
+	struct rte_dma_info dev_info = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_vchan_setup(invalid_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_vchan_setup(test_dev_id, 0, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Make sure configure success */
+	ret = rte_dma_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	dev_conf.nb_vchans = dev_info.max_vchans;
+	ret = rte_dma_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret);
+
+	/* Check for invalid vchan */
+	ret = rte_dma_vchan_setup(test_dev_id, dev_conf.nb_vchans, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for direction */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV + 1;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM - 1;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for direction and dev_capa combination */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_DEV;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_MEM;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for nb_desc validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc - 1;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.nb_desc = dev_info.max_desc + 1;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check src port type validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	vchan_conf.src_port.port_type = RTE_DMA_PORT_PCIE;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check dst port type validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	vchan_conf.dst_port.port_type = RTE_DMA_PORT_PCIE;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check vchan setup success */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+setup_one_vchan(void)
+{
+	struct rte_dma_vchan_conf vchan_conf = { 0 };
+	struct rte_dma_info dev_info = { 0 };
+	struct rte_dma_conf dev_conf = { 0 };
+	int ret;
+
+	ret = rte_dma_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+	dev_conf.nb_vchans = dev_info.max_vchans;
+	ret = rte_dma_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_start_stop(void)
+{
+	struct rte_dma_vchan_conf vchan_conf = { 0 };
+	struct rte_dma_conf dev_conf = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_start(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_stop(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dma_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Check reconfigure and vchan setup when device started */
+	ret = rte_dma_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT(ret == -EBUSY, "Failed to configure, %d", ret);
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EBUSY, "Failed to setup vchan, %d", ret);
+
+	ret = rte_dma_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_stats(void)
+{
+	struct rte_dma_info dev_info = { 0 };
+	struct rte_dma_stats stats = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_stats_get(invalid_dev_id, 0, &stats);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_stats_get(invalid_dev_id, 0, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_stats_reset(invalid_dev_id, 0);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	/* Check for invalid vchan */
+	ret = rte_dma_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+	ret = rte_dma_stats_get(test_dev_id, dev_info.max_vchans, &stats);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_stats_reset(test_dev_id, dev_info.max_vchans);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for valid vchan */
+	ret = rte_dma_stats_get(test_dev_id, 0, &stats);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get stats, %d", ret);
+	ret = rte_dma_stats_get(test_dev_id, RTE_DMA_ALL_VCHAN, &stats);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get all stats, %d", ret);
+	ret = rte_dma_stats_reset(test_dev_id, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset stats, %d", ret);
+	ret = rte_dma_stats_reset(test_dev_id, RTE_DMA_ALL_VCHAN);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset all stats, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_dump(void)
+{
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_dump(invalid_dev_id, stderr);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Excepted -EINVAL, %d", ret);
+	ret = rte_dma_dump(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Excepted -EINVAL, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_completed(void)
+{
+	uint16_t last_idx = 1;
+	bool has_error = true;
+	uint16_t cpl_ret;
+	int ret, i;
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dma_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Setup test memory */
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++)
+		src[i] = (char)i;
+	memset(dst, 0, TEST_MEMCPY_SIZE);
+
+	/* Check enqueue without submit */
+	ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+			   TEST_MEMCPY_SIZE, 0);
+	RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed(test_dev_id, 0, 1, &last_idx, &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to get completed");
+
+	/* Check add submit */
+	ret = rte_dma_submit(test_dev_id, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to submit, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed(test_dev_id, 0, 1, &last_idx, &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u",
+				last_idx);
+	RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++) {
+		if (src[i] != dst[i]) {
+			RTE_TEST_ASSERT_EQUAL(src[i], dst[i],
+				"Failed to copy memory, %d %d", src[i], dst[i]);
+			break;
+		}
+	}
+
+	/* Setup test memory */
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++)
+		src[i] = (char)i;
+	memset(dst, 0, TEST_MEMCPY_SIZE);
+
+	/* Check for enqueue with submit */
+	ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+			   TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 1, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed(test_dev_id, 0, 1, &last_idx, &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u",
+				last_idx);
+	RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++) {
+		if (src[i] != dst[i]) {
+			RTE_TEST_ASSERT_EQUAL(src[i], dst[i],
+				"Failed to copy memory, %d %d", src[i], dst[i]);
+			break;
+		}
+	}
+
+	/* Stop dmadev to make sure dmadev to a known state */
+	ret = rte_dma_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_completed_status(void)
+{
+	enum rte_dma_status_code status[1] = { 1 };
+	uint16_t last_idx = 1;
+	uint16_t cpl_ret, i;
+	int ret;
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dma_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Check for enqueue with submit */
+	ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+			   TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed_status(test_dev_id, 0, 1, &last_idx,
+					   status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u",
+				last_idx);
+	for (i = 0; i < RTE_DIM(status); i++)
+		RTE_TEST_ASSERT_EQUAL(status[i], 0,
+				"Failed to completed status, %d", status[i]);
+
+	/* Check do completed status again */
+	cpl_ret = rte_dma_completed_status(test_dev_id, 0, 1, &last_idx,
+					   status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to completed status");
+
+	/* Check for enqueue with submit again */
+	ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+			   TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 1, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed_status(test_dev_id, 0, 1, &last_idx,
+					   status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u",
+				last_idx);
+	for (i = 0; i < RTE_DIM(status); i++)
+		RTE_TEST_ASSERT_EQUAL(status[i], 0,
+				"Failed to completed status, %d", status[i]);
+
+	/* Stop dmadev to make sure dmadev to a known state */
+	ret = rte_dma_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+int
+test_dma_api(uint16_t dev_id)
+{
+	int ret = testsuite_setup(dev_id);
+	if (ret) {
+		printf("testsuite setup fail!\n");
+		return -1;
+	}
+
+	/* If the testcase exit successfully, ensure that the test dmadev exist
+	 * and the dmadev is in the stopped state.
+	 */
+	DMA_TEST_API_RUN(test_dma_get_dev_id);
+	DMA_TEST_API_RUN(test_dma_is_valid_dev);
+	DMA_TEST_API_RUN(test_dma_count);
+	DMA_TEST_API_RUN(test_dma_info_get);
+	DMA_TEST_API_RUN(test_dma_configure);
+	DMA_TEST_API_RUN(test_dma_vchan_setup);
+	DMA_TEST_API_RUN(test_dma_start_stop);
+	DMA_TEST_API_RUN(test_dma_stats);
+	DMA_TEST_API_RUN(test_dma_dump);
+	DMA_TEST_API_RUN(test_dma_completed);
+	DMA_TEST_API_RUN(test_dma_completed_status);
+
+	testsuite_teardown();
+
+	printf("Total tests   : %d\n", total);
+	printf("Passed        : %d\n", passed);
+	printf("Failed        : %d\n", failed);
+
+	if (failed)
+		return -1;
+
+	return 0;
+};
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v21 1/7] dmadev: introduce DMA device library public APIs
  2021-09-09 10:33     ` Thomas Monjalon
  2021-09-09 11:18       ` Bruce Richardson
  2021-09-09 13:33       ` fengchengwen
@ 2021-09-16  3:57       ` fengchengwen
  2 siblings, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-09-16  3:57 UTC (permalink / raw)
  To: Thomas Monjalon, bruce.richardson
  Cc: ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev, mb,
	nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

Hi Thomas,

Most fixed in V22, some please see inline comment

Thanks.

On 2021/9/9 18:33, Thomas Monjalon wrote:
> Hi,
> 
> I am having a surface look at the API.
> I hope we can do better than previous libs.
> 
> 07/09/2021 14:56, Chengwen Feng:
>> --- a/MAINTAINERS
>> +++ b/MAINTAINERS
>> @@ -496,6 +496,10 @@ F: drivers/raw/skeleton/
>>  F: app/test/test_rawdev.c
>>  F: doc/guides/prog_guide/rawdev.rst
>>  
>> +DMA device API - EXPERIMENTAL
>> +M: Chengwen Feng <fengchengwen@huawei.com>
>> +F: lib/dmadev/

[snip]

>> +
>> +/* Enumerates DMA device capabilities. */
> 
> You should group them with a doxygen group syntax.
> See https://patches.dpdk.org/project/dpdk/patch/20210830104232.598703-1-thomas@monjalon.net/

Because RTE_DMADEV_CAPA_* has multiple lines of comments, the effect is not good
when using group syntax.

Also consider using enum to define, but its value is uint64_t, and enumeration is
generally of the int type.

So it stays the same here.

> 
>> +#define RTE_DMADEV_CAPA_MEM_TO_MEM	(1ull << 0)
> 
> Please use RTE_BIT macro (32 or 64).
> 
>> +/**< DMA device support memory-to-memory transfer.
>> + *
>> + * @see struct rte_dmadev_info::dev_capa
>> + */
> 

[snip]

> 
> This series add one file per patch.
> Instead it would be better to have groups of features per patch,
> meaning the implementation and the driver interface should be
> in the same patch.
> You can split like this:
> 	1/ device allocation
> 	2/ configuration and start/stop
> 	3/ dataplane functions
> 
> I would suggest 2 more patches:
> 	4/ event notification
> see https://patches.dpdk.org/project/dpdk/patch/20210730135533.417611-3-thomas@monjalon.net/
> 	5/ multi-process
> see https://patches.dpdk.org/project/dpdk/patch/20210730135533.417611-5-thomas@monjalon.net/
> 

The multi-process have many modify for device allocation, because the coupling
between the two is deep, I combines them to one patch.

> 
> Thanks for the work
> 
> 
> .
> 

^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v23 0/6] support dmadev
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (25 preceding siblings ...)
  2021-09-16  3:41 ` [dpdk-dev] [PATCH v22 0/5] support dmadev Chengwen Feng
@ 2021-09-24 10:53 ` Chengwen Feng
  2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 1/6] dmadev: introduce DMA device library Chengwen Feng
                     ` (5 more replies)
  2021-10-09  9:33 ` [dpdk-dev] [PATCH v24 0/6] support dmadev Chengwen Feng
                   ` (2 subsequent siblings)
  29 siblings, 6 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-24 10:53 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch set contains six patch for new add dmadev.

Chengwen Feng (6):
  dmadev: introduce DMA device library
  dmadev: add control plane function support
  dmadev: add data plane function support
  dmadev: add multi-process support
  dma/skeleton: introduce skeleton dmadev driver
  app/test: add dmadev API test

---
v23:
* split multi-process support from 1st patch.
* fix some static check warning.
* fix skeleton cpu thread zero_req_count flip bug.
* add test_dmadev_api.h.
* add the description of modifying the dmadev state when init OK.
v22:
* function prefix change from rte_dmadev_* to rte_dma_*.
* change to prefix comment in most scenarios.
* dmadev dev_id use int16_t type.
* fix typo.
* organize patchsets in incremental mode.
v21:
* add comment for reserved fields of struct rte_dmadev.
v20:
* delete unnecessary and duplicate include header files.
* the conf_sz parameter is added to the configure and vchan-setup
  callbacks of the PMD, this is mainly used to enhance ABI
  compatibility.
* the rte_dmadev structure field is rearranged to reserve more space
  for I/O functions.
* fix some ambiguous and unnecessary comments.
* fix the potential memory leak of ut.
* redefine skeldma_init_once to skeldma_count.
* suppress rte_dmadev error output when execute ut.

 MAINTAINERS                            |    7 +
 app/test/meson.build                   |    4 +
 app/test/test_dmadev.c                 |   41 +
 app/test/test_dmadev_api.c             |  574 +++++++++++++
 app/test/test_dmadev_api.h             |    5 +
 config/rte_config.h                    |    3 +
 doc/api/doxy-api-index.md              |    1 +
 doc/api/doxy-api.conf.in               |    1 +
 doc/guides/dmadevs/index.rst           |   12 +
 doc/guides/index.rst                   |    1 +
 doc/guides/prog_guide/dmadev.rst       |  127 +++
 doc/guides/prog_guide/img/dmadev.svg   |  283 +++++++
 doc/guides/prog_guide/index.rst        |    1 +
 doc/guides/rel_notes/release_21_11.rst |    7 +
 drivers/dma/meson.build                |    6 +
 drivers/dma/skeleton/meson.build       |    7 +
 drivers/dma/skeleton/skeleton_dmadev.c |  570 +++++++++++++
 drivers/dma/skeleton/skeleton_dmadev.h |   61 ++
 drivers/dma/skeleton/version.map       |    3 +
 drivers/meson.build                    |    1 +
 lib/dmadev/meson.build                 |    7 +
 lib/dmadev/rte_dmadev.c                |  728 ++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 1074 ++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h           |  181 ++++
 lib/dmadev/rte_dmadev_pmd.h            |   60 ++
 lib/dmadev/version.map                 |   35 +
 lib/meson.build                        |    1 +
 27 files changed, 3801 insertions(+)
 create mode 100644 app/test/test_dmadev.c
 create mode 100644 app/test/test_dmadev_api.c
 create mode 100644 app/test/test_dmadev_api.h
 create mode 100644 doc/guides/dmadevs/index.rst
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg
 create mode 100644 drivers/dma/meson.build
 create mode 100644 drivers/dma/skeleton/meson.build
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
 create mode 100644 drivers/dma/skeleton/version.map
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v23 1/6] dmadev: introduce DMA device library
  2021-09-24 10:53 ` [dpdk-dev] [PATCH v23 0/6] support dmadev Chengwen Feng
@ 2021-09-24 10:53   ` Chengwen Feng
  2021-10-04 21:12     ` Radha Mohan
  2021-10-06 10:26     ` Thomas Monjalon
  2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 2/6] dmadev: add control plane function support Chengwen Feng
                     ` (4 subsequent siblings)
  5 siblings, 2 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-24 10:53 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

The 'dmadevice' is a generic type of DMA device.

This patch introduce the 'dmadevice' device allocation APIs.

The infrastructure is prepared to welcome drivers in drivers/dma/

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 MAINTAINERS                            |   5 +
 config/rte_config.h                    |   3 +
 doc/api/doxy-api-index.md              |   1 +
 doc/api/doxy-api.conf.in               |   1 +
 doc/guides/dmadevs/index.rst           |  12 ++
 doc/guides/index.rst                   |   1 +
 doc/guides/prog_guide/dmadev.rst       |  64 ++++++
 doc/guides/prog_guide/img/dmadev.svg   | 283 +++++++++++++++++++++++++
 doc/guides/prog_guide/index.rst        |   1 +
 doc/guides/rel_notes/release_21_11.rst |   4 +
 drivers/dma/meson.build                |   4 +
 drivers/meson.build                    |   1 +
 lib/dmadev/meson.build                 |   7 +
 lib/dmadev/rte_dmadev.c                | 263 +++++++++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 134 ++++++++++++
 lib/dmadev/rte_dmadev_core.h           |  51 +++++
 lib/dmadev/rte_dmadev_pmd.h            |  60 ++++++
 lib/dmadev/version.map                 |  20 ++
 lib/meson.build                        |   1 +
 19 files changed, 916 insertions(+)
 create mode 100644 doc/guides/dmadevs/index.rst
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg
 create mode 100644 drivers/dma/meson.build
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 77a549a5e8..a5b11ac70b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -454,6 +454,11 @@ F: app/test-regex/
 F: doc/guides/prog_guide/regexdev.rst
 F: doc/guides/regexdevs/features/default.ini
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+F: doc/guides/prog_guide/dmadev.rst
+
 Eventdev API
 M: Jerin Jacob <jerinj@marvell.com>
 T: git://dpdk.org/next/dpdk-next-eventdev
diff --git a/config/rte_config.h b/config/rte_config.h
index 590903c07d..6e397a62ab 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -70,6 +70,9 @@
 /* regexdev defines */
 #define RTE_MAX_REGEXDEV_DEVS 32
 
+/* dmadev defines */
+#define RTE_DMADEV_DEFAULT_MAX_DEVS 64
+
 /* eventdev defines */
 #define RTE_EVENT_MAX_DEVS 16
 #define RTE_EVENT_MAX_QUEUES_PER_DEV 255
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107a03..2939050431 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -21,6 +21,7 @@ The public API headers are grouped by topics:
   [compressdev]        (@ref rte_compressdev.h),
   [compress]           (@ref rte_comp.h),
   [regexdev]           (@ref rte_regexdev.h),
+  [dmadev]             (@ref rte_dmadev.h),
   [eventdev]           (@ref rte_eventdev.h),
   [event_eth_rx_adapter]   (@ref rte_event_eth_rx_adapter.h),
   [event_eth_tx_adapter]   (@ref rte_event_eth_tx_adapter.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 325a0195c6..109ec1f682 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -35,6 +35,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
                           @TOPDIR@/lib/distributor \
+                          @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/efd \
                           @TOPDIR@/lib/ethdev \
                           @TOPDIR@/lib/eventdev \
diff --git a/doc/guides/dmadevs/index.rst b/doc/guides/dmadevs/index.rst
new file mode 100644
index 0000000000..0bce29d766
--- /dev/null
+++ b/doc/guides/dmadevs/index.rst
@@ -0,0 +1,12 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright 2021 HiSilicon Limited
+
+DMA Device Drivers
+==================
+
+The following are a list of DMA device drivers, which can be used from
+an application through DMA API.
+
+.. toctree::
+   :maxdepth: 2
+   :numbered:
diff --git a/doc/guides/index.rst b/doc/guides/index.rst
index 857f0363d3..919825992e 100644
--- a/doc/guides/index.rst
+++ b/doc/guides/index.rst
@@ -21,6 +21,7 @@ DPDK documentation
    compressdevs/index
    vdpadevs/index
    regexdevs/index
+   dmadevs/index
    eventdevs/index
    rawdevs/index
    mempool/index
diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
new file mode 100644
index 0000000000..822282213c
--- /dev/null
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -0,0 +1,64 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright 2021 HiSilicon Limited
+
+DMA Device Library
+==================
+
+The DMA library provides a DMA device framework for management and provisioning
+of hardware and software DMA poll mode drivers, defining generic APIs which
+support a number of different DMA operations.
+
+
+Design Principles
+-----------------
+
+The DMA library follows the same basic principles as those used in DPDK's
+Ethernet Device framework and the RegEx framework. The DMA framework provides
+a generic DMA device framework which supports both physical (hardware)
+and virtual (software) DMA devices as well as a generic DMA API which allows
+DMA devices to be managed and configured and supports DMA operations to be
+provisioned on DMA poll mode driver.
+
+.. _figure_dmadev:
+
+.. figure:: img/dmadev.*
+
+The above figure shows the model on which the DMA framework is built on:
+
+ * The DMA controller could have multiple hardware DMA channels (aka. hardware
+   DMA queues), each hardware DMA channel should be represented by a dmadev.
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+   channel represents a different transfer context. The DMA operation request
+   must be submitted to the virtual DMA channel. e.g. Application could create
+   virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+   virtual DMA channel 1 for memory-to-device transfer scenario.
+
+
+Device Management
+-----------------
+
+Device Creation
+~~~~~~~~~~~~~~~
+
+Physical DMA controllers are discovered during the PCI probe/enumeration of the
+EAL function which is executed at DPDK initialization, this is based on their
+PCI BDF (bus/bridge, device, function). Specific physical DMA controllers, like
+other physical devices in DPDK can be listed using the EAL command line options.
+
+The dmadevs are dynamically allocated by using the API
+``rte_dma_pmd_allocate`` based on the number of hardware DMA channels. After the
+dmadev initialized successfully, the driver needs to switch the dmadev state to
+``RTE_DMA_DEV_READY``.
+
+
+Device Identification
+~~~~~~~~~~~~~~~~~~~~~
+
+Each DMA device, whether physical or virtual is uniquely designated by two
+identifiers:
+
+- A unique device index used to designate the DMA device in all functions
+  exported by the DMA API.
+
+- A device name used to designate the DMA device in console messages, for
+  administration or debugging purposes.
diff --git a/doc/guides/prog_guide/img/dmadev.svg b/doc/guides/prog_guide/img/dmadev.svg
new file mode 100644
index 0000000000..157d7eb7dc
--- /dev/null
+++ b/doc/guides/prog_guide/img/dmadev.svg
@@ -0,0 +1,283 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<!-- SPDX-License-Identifier: BSD-3-Clause -->
+<!-- Copyright(c) 2021 HiSilicon Limited -->
+
+<svg
+   width="128.64288mm"
+   height="95.477707mm"
+   viewBox="0 0 192.96433 143.21656"
+   version="1.1"
+   id="svg934"
+   inkscape:version="1.1 (c68e22c387, 2021-05-23)"
+   sodipodi:docname="dmadev.svg"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg">
+  <sodipodi:namedview
+     id="namedview936"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageshadow="2"
+     inkscape:pageopacity="0.0"
+     inkscape:pagecheckerboard="0"
+     inkscape:document-units="mm"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:showpageshadow="false"
+     inkscape:zoom="1.332716"
+     inkscape:cx="335.03011"
+     inkscape:cy="143.69152"
+     inkscape:window-width="1920"
+     inkscape:window-height="976"
+     inkscape:window-x="-8"
+     inkscape:window-y="-8"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="layer1"
+     scale-x="1.5"
+     units="mm" />
+  <defs
+     id="defs931">
+    <rect
+       x="342.43954"
+       y="106.56832"
+       width="58.257381"
+       height="137.82834"
+       id="rect17873" />
+  </defs>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-0.13857517,-21.527306)">
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9"
+       width="50"
+       height="28"
+       x="0.13857517"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1"
+       transform="translate(-49.110795,15.205683)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1045">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1047">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5"
+       width="50"
+       height="28"
+       x="60.138577"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4"
+       transform="translate(10.512565,15.373298)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1049">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1051">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5-3"
+       width="50"
+       height="28"
+       x="137.43863"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-8"
+       transform="translate(88.79231,15.373299)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1053">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1055">channel</tspan></text>
+    <text
+       xml:space="preserve"
+       transform="matrix(0.26458333,0,0,0.26458333,-0.04940429,21.408845)"
+       id="text17871"
+       style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;white-space:pre;shape-inside:url(#rect17873);fill:#000000;fill-opacity:1;stroke:none" />
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8"
+       width="38.34557"
+       height="19.729115"
+       x="36.138577"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3"
+       transform="translate(-13.394978,59.135217)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1057">dmadev</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0"
+       width="60.902534"
+       height="24.616455"
+       x="25.196909"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76"
+       transform="translate(-24.485484,90.97883)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1059">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1061">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-6"
+       width="60.902534"
+       height="24.616455"
+       x="132.20036"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-7"
+       transform="translate(82.950904,90.79085)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1063">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1065">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-4"
+       width="60.902534"
+       height="24.616455"
+       x="76.810928"
+       y="140.12741"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-4"
+       transform="translate(27.032341,133.10574)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1067">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1069">controller</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8-5"
+       width="38.34557"
+       height="19.729115"
+       x="143.43863"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-7"
+       transform="translate(94.92597,59.664385)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1071">dmadev</tspan></text>
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 74.476373,49.527306 62.82407,64.827354"
+       id="path45308"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 35.924309,49.527306 47.711612,64.827354"
+       id="path45310"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 55.403414,84.556469 55.53332,98.47744"
+       id="path45312"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8"
+       inkscape:connection-end="#rect31-9-5-8-0" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.62241,84.556469 0.0155,13.920971"
+       id="path45320"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-5"
+       inkscape:connection-end="#rect31-9-5-8-0-6" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 146.28317,123.09389 -22.65252,17.03352"
+       id="path45586"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0-6"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 70.900938,123.09389 21.108496,17.03352"
+       id="path45588"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.50039,49.527306 0.0675,15.300048"
+       id="path45956"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-3"
+       inkscape:connection-end="#rect31-9-5-8-5" />
+  </g>
+</svg>
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 2dce507f46..89af28dacb 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -27,6 +27,7 @@ Programmer's Guide
     cryptodev_lib
     compressdev
     regexdev
+    dmadev
     rte_security
     rawdev
     link_bonding_poll_mode_drv_lib
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 19356ac53c..74639f1e81 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -106,6 +106,10 @@ New Features
   Added command-line options to specify total number of processes and
   current process ID. Each process owns subset of Rx and Tx queues.
 
+* **Introduced dmadev library with:**
+
+  * Device allocation APIs.
+
 
 Removed Items
 -------------
diff --git a/drivers/dma/meson.build b/drivers/dma/meson.build
new file mode 100644
index 0000000000..a24c56d8ff
--- /dev/null
+++ b/drivers/dma/meson.build
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2021 HiSilicon Limited
+
+drivers = []
diff --git a/drivers/meson.build b/drivers/meson.build
index 3d08540581..b7d680868a 100644
--- a/drivers/meson.build
+++ b/drivers/meson.build
@@ -18,6 +18,7 @@ subdirs = [
         'vdpa',           # depends on common, bus and mempool.
         'event',          # depends on common, bus, mempool and net.
         'baseband',       # depends on common and bus.
+        'dma',            # depends on common and bus.
 ]
 
 if meson.is_cross_build()
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000000..d2fc85e8c7
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+sources = files('rte_dmadev.c')
+headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000000..96af3f0772
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include <inttypes.h>
+
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+struct rte_dma_dev *rte_dma_devices;
+static int16_t dma_devices_max;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
+#define RTE_DMA_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, rte_dma_logtype, "%s(): " fmt "\n", \
+		__func__, ##args)
+
+/* Macros to check for valid device id */
+#define RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
+	if (!rte_dma_is_valid(dev_id)) { \
+		RTE_DMA_LOG(ERR, "Invalid dev_id=%d", dev_id); \
+		return retval; \
+	} \
+} while (0)
+
+int
+rte_dma_dev_max(size_t dev_max)
+{
+	/* This function may be called before rte_eal_init(), so no rte library
+	 * function can be called in this function.
+	 */
+	if (dev_max == 0 || dev_max > INT16_MAX)
+		return -EINVAL;
+
+	if (dma_devices_max > 0)
+		return -EINVAL;
+
+	dma_devices_max = dev_max;
+
+	return 0;
+}
+
+static int
+dma_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMA_LOG(ERR, "Name can't be NULL");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMA_LOG(ERR, "Zero length DMA device name");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DEV_NAME_MAX_LEN) {
+		RTE_DMA_LOG(ERR, "DMA device name is too long");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int16_t
+dma_find_free_dev(void)
+{
+	int16_t i;
+
+	if (rte_dma_devices == NULL)
+		return -1;
+
+	for (i = 0; i < dma_devices_max; i++) {
+		if (rte_dma_devices[i].dev_name[0] == '\0')
+			return i;
+	}
+
+	return -1;
+}
+
+static struct rte_dma_dev*
+dma_find(const char *name)
+{
+	int16_t i;
+
+	if (rte_dma_devices == NULL)
+		return NULL;
+
+	for (i = 0; i < dma_devices_max; i++) {
+		if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) &&
+		    (!strcmp(name, rte_dma_devices[i].dev_name)))
+			return &rte_dma_devices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dma_process_data_prepare(void)
+{
+	size_t size;
+	void *ptr;
+
+	if (rte_dma_devices != NULL)
+		return 0;
+
+	/* The return value of malloc may not be aligned to the cache line.
+	 * Therefore, extra memory is applied for realignment.
+	 * note: We do not call posix_memalign/aligned_alloc because it is
+	 * version dependent on libc.
+	 */
+	size = dma_devices_max * sizeof(struct rte_dma_dev) +
+		RTE_CACHE_LINE_SIZE;
+	ptr = malloc(size);
+	if (ptr == NULL)
+		return -ENOMEM;
+	memset(ptr, 0, size);
+
+	rte_dma_devices = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE);
+
+	return 0;
+}
+
+static int
+dma_data_prepare(void)
+{
+	if (dma_devices_max == 0)
+		dma_devices_max = RTE_DMADEV_DEFAULT_MAX_DEVS;
+	return dma_process_data_prepare();
+}
+
+static struct rte_dma_dev *
+dma_allocate(const char *name, int numa_node, size_t private_data_size)
+{
+	struct rte_dma_dev *dev;
+	void *dev_private;
+	int16_t dev_id;
+	int ret;
+
+	ret = dma_data_prepare();
+	if (ret < 0) {
+		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
+		return NULL;
+	}
+
+	dev = dma_find(name);
+	if (dev != NULL) {
+		RTE_DMA_LOG(ERR, "DMA device already allocated");
+		return NULL;
+	}
+
+	dev_private = rte_zmalloc_socket(name, private_data_size,
+					 RTE_CACHE_LINE_SIZE, numa_node);
+	if (dev_private == NULL) {
+		RTE_DMA_LOG(ERR, "Cannot allocate private data");
+		return NULL;
+	}
+
+	dev_id = dma_find_free_dev();
+	if (dev_id < 0) {
+		RTE_DMA_LOG(ERR, "Reached maximum number of DMA devices");
+		rte_free(dev_private);
+		return NULL;
+	}
+
+	dev = &rte_dma_devices[dev_id];
+	dev->dev_private = dev_private;
+	rte_strscpy(dev->dev_name, name, sizeof(dev->dev_name));
+	dev->dev_id = dev_id;
+	dev->numa_node = numa_node;
+	dev->dev_private = dev_private;
+
+	return dev;
+}
+
+static void
+dma_release(struct rte_dma_dev *dev)
+{
+	rte_free(dev->dev_private);
+	memset(dev, 0, sizeof(struct rte_dma_dev));
+}
+
+struct rte_dma_dev *
+rte_dma_pmd_allocate(const char *name, int numa_node, size_t private_data_size)
+{
+	struct rte_dma_dev *dev;
+
+	if (dma_check_name(name) != 0 || private_data_size == 0)
+		return NULL;
+
+	dev = dma_allocate(name, numa_node, private_data_size);
+	if (dev == NULL)
+		return NULL;
+
+	dev->state = RTE_DMA_DEV_REGISTERED;
+
+	return dev;
+}
+
+int
+rte_dma_pmd_release(const char *name)
+{
+	struct rte_dma_dev *dev;
+
+	if (dma_check_name(name) != 0)
+		return -EINVAL;
+
+	dev = dma_find(name);
+	if (dev == NULL)
+		return -EINVAL;
+
+	dma_release(dev);
+	return 0;
+}
+
+int
+rte_dma_get_dev_id(const char *name)
+{
+	struct rte_dma_dev *dev;
+
+	if (dma_check_name(name) != 0)
+		return -EINVAL;
+
+	dev = dma_find(name);
+	if (dev == NULL)
+		return -EINVAL;
+
+	return dev->dev_id;
+}
+
+bool
+rte_dma_is_valid(int16_t dev_id)
+{
+	return (dev_id >= 0) && (dev_id < dma_devices_max) &&
+		rte_dma_devices != NULL &&
+		rte_dma_devices[dev_id].state != RTE_DMA_DEV_UNUSED;
+}
+
+uint16_t
+rte_dma_count_avail(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	if (rte_dma_devices == NULL)
+		return count;
+
+	for (i = 0; i < dma_devices_max; i++) {
+		if (rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED)
+			count++;
+	}
+
+	return count;
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000000..17dc0d1226
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ * Copyright(c) 2021 Intel Corporation
+ * Copyright(c) 2021 Marvell International Ltd
+ * Copyright(c) 2021 SmartShare Systems
+ */
+
+#ifndef RTE_DMADEV_H
+#define RTE_DMADEV_H
+
+/**
+ * @file rte_dmadev.h
+ *
+ * DMA (Direct Memory Access) device API.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW DMA channel |               | HW DMA channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW DMA Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel. e.g. Application could create
+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+ * virtual DMA channel 1 for memory-to-device transfer scenario.
+ *
+ * The dmadev are dynamically allocated by rte_dma_pmd_allocate() during the
+ * PCI/SoC device probing phase performed at EAL initialization time. And could
+ * be released by rte_dma_pmd_release() during the PCI/SoC device removing
+ * phase.
+ *
+ * This framework uses 'int16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ */
+
+#include <stdint.h>
+
+#include <rte_bitops.h>
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure the maximum number of dmadevs.
+ * @note This function can be invoked before the primary process rte_eal_init()
+ * to change the maximum number of dmadevs.
+ *
+ * @param dev_max
+ *   maximum number of dmadevs.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_dev_max(size_t dev_max);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the device identifier for the named DMA device.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   Returns DMA device identifier on success.
+ *   - <0: Failure to find named DMA device.
+ */
+__rte_experimental
+int rte_dma_get_dev_id(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool rte_dma_is_valid(int16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t rte_dma_count_avail(void);
+
+#include "rte_dmadev_core.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_DMADEV_H */
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000000..5ed96853b2
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#ifndef RTE_DMADEV_CORE_H
+#define RTE_DMADEV_CORE_H
+
+/**
+ * @file
+ *
+ * DMA Device internal header.
+ *
+ * This header contains internal data types, that are used by the DMA devices
+ * in order to expose their ops to the class.
+ *
+ * Applications should not use these API directly.
+ *
+ */
+
+/**
+ * Possible states of a DMA device.
+ *
+ * @see struct rte_dmadev::state
+ */
+enum rte_dma_dev_state {
+	RTE_DMA_DEV_UNUSED = 0, /**< Device is unused. */
+	/** Device is registered, but not ready to be used. */
+	RTE_DMA_DEV_REGISTERED,
+	/** Device is ready for use. This is set by the PMD. */
+	RTE_DMA_DEV_READY,
+};
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ */
+struct rte_dma_dev {
+	char dev_name[RTE_DEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	int16_t dev_id; /**< Device [external] identifier. */
+	int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */
+	void *dev_private; /**< PMD-specific private data. */
+	/** Device info which supplied during device initialization. */
+	struct rte_device *device;
+	enum rte_dma_dev_state state; /**< Flag indicating the device state. */
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+} __rte_cache_aligned;
+
+extern struct rte_dma_dev *rte_dma_devices;
+
+#endif /* RTE_DMADEV_CORE_H */
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000000..02281c74fd
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+#ifndef RTE_DMADEV_PMD_H
+#define RTE_DMADEV_PMD_H
+
+/**
+ * @file
+ *
+ * DMA Device PMD APIs
+ *
+ * Driver facing APIs for a DMA device. These are not to be called directly by
+ * any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Allocates a new dmadev slot for an DMA device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ * @param numa_node
+ *   Driver's private data's numa node.
+ * @param private_data_size
+ *   Driver's private data size.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dma_dev *rte_dma_pmd_allocate(const char *name, int numa_node,
+					 size_t private_data_size);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   - 0 on success, negative on error.
+ */
+__rte_internal
+int rte_dma_pmd_release(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_DMADEV_PMD_H */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000000..56ea0332cb
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,20 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dma_count_avail;
+	rte_dma_dev_max;
+	rte_dma_get_dev_id;
+	rte_dma_is_valid;
+
+	local: *;
+};
+
+INTERNAL {
+	global:
+
+	rte_dma_devices;
+	rte_dma_pmd_allocate;
+	rte_dma_pmd_release;
+
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index 1673ca4323..3dd920f5c5 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -45,6 +45,7 @@ libraries = [
         'pdump',
         'rawdev',
         'regexdev',
+        'dmadev',
         'rib',
         'reorder',
         'sched',
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v23 2/6] dmadev: add control plane function support
  2021-09-24 10:53 ` [dpdk-dev] [PATCH v23 0/6] support dmadev Chengwen Feng
  2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 1/6] dmadev: introduce DMA device library Chengwen Feng
@ 2021-09-24 10:53   ` Chengwen Feng
  2021-10-05 10:16     ` Matan Azrad
  2021-10-06 10:46     ` Thomas Monjalon
  2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 3/6] dmadev: add data " Chengwen Feng
                     ` (3 subsequent siblings)
  5 siblings, 2 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-24 10:53 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch add control plane functions for dmadev.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 doc/guides/prog_guide/dmadev.rst       |  41 +++
 doc/guides/rel_notes/release_21_11.rst |   1 +
 lib/dmadev/rte_dmadev.c                | 359 ++++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 480 +++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h           |  62 +++-
 lib/dmadev/version.map                 |   9 +
 6 files changed, 951 insertions(+), 1 deletion(-)

diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
index 822282213c..c2b0b0420b 100644
--- a/doc/guides/prog_guide/dmadev.rst
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -62,3 +62,44 @@ identifiers:
 
 - A device name used to designate the DMA device in console messages, for
   administration or debugging purposes.
+
+
+Device Configuration
+~~~~~~~~~~~~~~~~~~~~
+
+The rte_dma_configure API is used to configure a DMA device.
+
+.. code-block:: c
+
+   int rte_dma_configure(int16_t dev_id,
+                         const struct rte_dma_conf *dev_conf);
+
+The ``rte_dma_conf`` structure is used to pass the configuration parameters
+for the DMA device for example the number of virtual DMA channels to set up,
+indication of whether to enable silent mode.
+
+
+Configuration of Virtual DMA Channels
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The rte_dma_vchan_setup API is used to configure a virtual DMA channel.
+
+.. code-block:: c
+
+   int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+                           const struct rte_dma_vchan_conf *conf);
+
+The ``rte_dma_vchan_conf`` structure is used to pass the configuration
+parameters for the virtual DMA channel for example transfer direction, number of
+descriptor for the virtual DMA channel, source device access port parameter,
+destination device access port parameter.
+
+
+Device Features and Capabilities
+--------------------------------
+
+DMA devices may support different feature sets. The ``rte_dma_info_get`` API
+can be used to get the device info and supported features.
+
+Silent mode is a special device capability which does not require the
+application to invoke dequeue APIs.
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 74639f1e81..0aceaa8837 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -109,6 +109,7 @@ New Features
 * **Introduced dmadev library with:**
 
   * Device allocation APIs.
+  * Control plane APIs.
 
 
 Removed Items
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
index 96af3f0772..e0134b9eec 100644
--- a/lib/dmadev/rte_dmadev.c
+++ b/lib/dmadev/rte_dmadev.c
@@ -218,6 +218,9 @@ rte_dma_pmd_release(const char *name)
 	if (dev == NULL)
 		return -EINVAL;
 
+	if (dev->state == RTE_DMA_DEV_READY)
+		return rte_dma_close(dev->dev_id);
+
 	dma_release(dev);
 	return 0;
 }
@@ -261,3 +264,359 @@ rte_dma_count_avail(void)
 
 	return count;
 }
+
+int
+rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
+{
+	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dma_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dma_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->numa_node = dev->device->numa_node;
+	dev_info->nb_vchans = dev->dev_conf.nb_vchans;
+
+	return 0;
+}
+
+int
+rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	struct rte_dma_info dev_info;
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (dev_conf == NULL)
+		return -EINVAL;
+
+	if (dev->dev_started != 0) {
+		RTE_DMA_LOG(ERR,
+			"Device %d must be stopped to allow configuration",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dma_info_get(dev_id, &dev_info);
+	if (ret != 0) {
+		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans == 0) {
+		RTE_DMA_LOG(ERR,
+			"Device %d configure zero vchans", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans > dev_info.max_vchans) {
+		RTE_DMA_LOG(ERR,
+			"Device %d configure too many vchans", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_silent &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) {
+		RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
+					     sizeof(struct rte_dma_conf));
+	if (ret == 0)
+		memcpy(&dev->dev_conf, dev_conf, sizeof(struct rte_dma_conf));
+
+	return ret;
+}
+
+int
+rte_dma_start(int16_t dev_id)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->dev_conf.nb_vchans == 0) {
+		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
+		return -EINVAL;
+	}
+
+	if (dev->dev_started != 0) {
+		RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dma_stop(int16_t dev_id)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	if (dev->dev_started == 0) {
+		RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dma_close(int16_t dev_id)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+
+	/* Device must be stopped before it can be closed */
+	if (dev->dev_started == 1) {
+		RTE_DMA_LOG(ERR,
+			"Device %d must be stopped before closing", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_close)(dev);
+	if (ret == 0)
+		dma_release(dev);
+
+	return ret;
+}
+
+int
+rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+		    const struct rte_dma_vchan_conf *conf)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	struct rte_dma_info dev_info;
+	bool src_is_dev, dst_is_dev;
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (conf == NULL)
+		return -EINVAL;
+
+	if (dev->dev_started != 0) {
+		RTE_DMA_LOG(ERR,
+			"Device %d must be stopped to allow configuration",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dma_info_get(dev_id, &dev_info);
+	if (ret != 0) {
+		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+		return -EINVAL;
+	}
+	if (dev->dev_conf.nb_vchans == 0) {
+		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
+		return -EINVAL;
+	}
+	if (vchan >= dev_info.nb_vchans) {
+		RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+		RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support mem2mem transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support mem2dev transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support dev2mem transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support dev2dev transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < dev_info.min_desc ||
+	    conf->nb_desc > dev_info.max_desc) {
+		RTE_DMA_LOG(ERR,
+			"Device %d number of descriptors invalid", dev_id);
+		return -EINVAL;
+	}
+	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) ||
+	    (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) {
+		RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id);
+		return -EINVAL;
+	}
+	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) ||
+	    (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d destination port type invalid", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
+					sizeof(struct rte_dma_vchan_conf));
+}
+
+int
+rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
+{
+	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (stats == NULL)
+		return -EINVAL;
+	if (vchan >= dev->dev_conf.nb_vchans &&
+	    vchan != RTE_DMA_ALL_VCHAN) {
+		RTE_DMA_LOG(ERR,
+			"Device %d vchan %u out of range", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	memset(stats, 0, sizeof(struct rte_dma_stats));
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dma_stats));
+}
+
+int
+rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (vchan >= dev->dev_conf.nb_vchans &&
+	    vchan != RTE_DMA_ALL_VCHAN) {
+		RTE_DMA_LOG(ERR,
+			"Device %d vchan %u out of range", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+static const char *
+dma_capability_name(uint64_t capability)
+{
+	static const struct {
+		uint64_t capability;
+		const char *name;
+	} capa_names[] = {
+		{ RTE_DMA_CAPA_MEM_TO_MEM,  "mem2mem" },
+		{ RTE_DMA_CAPA_MEM_TO_DEV,  "mem2dev" },
+		{ RTE_DMA_CAPA_DEV_TO_MEM,  "dev2mem" },
+		{ RTE_DMA_CAPA_DEV_TO_DEV,  "dev2dev" },
+		{ RTE_DMA_CAPA_SVA,         "sva"     },
+		{ RTE_DMA_CAPA_SILENT,      "silent"  },
+		{ RTE_DMA_CAPA_OPS_COPY,    "copy"    },
+		{ RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" },
+		{ RTE_DMA_CAPA_OPS_FILL,    "fill"    },
+	};
+
+	const char *name = "unknown";
+	uint32_t i;
+
+	for (i = 0; i < RTE_DIM(capa_names); i++) {
+		if (capability == capa_names[i].capability) {
+			name = capa_names[i].name;
+			break;
+		}
+	}
+
+	return name;
+}
+
+static void
+dma_dump_capability(FILE *f, uint64_t dev_capa)
+{
+	uint64_t capa;
+
+	(void)fprintf(f, "  dev_capa: 0x%" PRIx64 " -", dev_capa);
+	while (dev_capa > 0) {
+		capa = 1ull << __builtin_ctzll(dev_capa);
+		(void)fprintf(f, " %s", dma_capability_name(capa));
+		dev_capa &= ~capa;
+	}
+	(void)fprintf(f, "\n");
+}
+
+int
+rte_dma_dump(int16_t dev_id, FILE *f)
+{
+	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	struct rte_dma_info dev_info;
+	int ret;
+
+	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
+	if (f == NULL)
+		return -EINVAL;
+
+	ret = rte_dma_info_get(dev_id, &dev_info);
+	if (ret != 0) {
+		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+		return -EINVAL;
+	}
+
+	(void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
+		dev->dev_id,
+		dev->dev_name,
+		dev->dev_started ? "started" : "stopped");
+	dma_dump_capability(f, dev_info.dev_capa);
+	(void)fprintf(f, "  max_vchans_supported: %u\n", dev_info.max_vchans);
+	(void)fprintf(f, "  nb_vchans_configured: %u\n", dev_info.nb_vchans);
+	(void)fprintf(f, "  silent_mode: %s\n",
+		dev->dev_conf.enable_silent ? "on" : "off");
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 17dc0d1226..5114c37446 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -53,6 +53,28 @@
  * This framework uses 'int16_t dev_id' as the device identifier of a dmadev,
  * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
  *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dma_configure()
+ *     - rte_dma_vchan_setup()
+ *     - rte_dma_start()
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dma_configure() or rte_dma_vchan_setup()), it must invoke
+ * rte_dma_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dma_start() again. The dataplane functions should not
+ * be invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the rte_dma_close()
+ * function.
+ *
+ * About MT-safe, all the functions of the dmadev API exported by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
+ *
  */
 
 #include <stdint.h>
@@ -125,6 +147,464 @@ bool rte_dma_is_valid(int16_t dev_id);
 __rte_experimental
 uint16_t rte_dma_count_avail(void);
 
+/** DMA device support memory-to-memory transfer.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_MEM_TO_MEM		RTE_BIT64(0)
+/** DMA device support memory-to-device transfer.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_MEM_TO_DEV		RTE_BIT64(1)
+/** DMA device support device-to-memory transfer.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_DEV_TO_MEM		RTE_BIT64(2)
+/** DMA device support device-to-device transfer.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_DEV_TO_DEV		RTE_BIT64(3)
+/** DMA device support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_SVA		RTE_BIT64(4)
+/** DMA device support work in silent mode.
+ * In this mode, application don't required to invoke rte_dma_completed*()
+ * API.
+ *
+ * @see struct rte_dma_conf::silent_mode
+ */
+#define RTE_DMA_CAPA_SILENT		RTE_BIT64(5)
+/** DMA device support copy ops.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_OPS_COPY		RTE_BIT64(32)
+/** DMA device support scatter-gather list copy ops.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_OPS_COPY_SG	RTE_BIT64(33)
+/** DMA device support fill ops.
+ *
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_OPS_FILL		RTE_BIT64(34)
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ *
+ * @see rte_dma_info_get
+ */
+struct rte_dma_info {
+	/** Device capabilities (RTE_DMA_CAPA_*). */
+	uint64_t dev_capa;
+	/** Maximum number of virtual DMA channels supported. */
+	uint16_t max_vchans;
+	/** Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t max_desc;
+	/** Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/** Maximum number of source or destination scatter-gather entry
+	 * supported.
+	 * If the device does not support COPY_SG capability, this value can be
+	 * zero.
+	 * If the device supports COPY_SG capability, then rte_dma_copy_sg()
+	 * parameter nb_src/nb_dst should not exceed this value.
+	 */
+	uint16_t max_sges;
+	/** NUMA node connection, -1 if unknown. */
+	int16_t numa_node;
+	/** Number of virtual DMA channel configured. */
+	uint16_t nb_vchans;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dma_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ *
+ * @see rte_dma_configure
+ */
+struct rte_dma_conf {
+	/** The number of virtual DMA channels to set up for the DMA device.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dma_info which get from rte_dma_info_get().
+	 */
+	uint16_t nb_vchans;
+	/** Indicates whether to enable silent mode.
+	 * false-default mode, true-silent mode.
+	 * This value can be set to true only when the SILENT capability is
+	 * supported.
+	 *
+	 * @see RTE_DMA_CAPA_SILENT
+	 */
+	bool enable_silent;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dma_conf
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_start(int16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dma_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stop(int16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_close(int16_t dev_id);
+
+/**
+ * DMA transfer direction defines.
+ *
+ * @see struct rte_dma_vchan_conf::direction
+ */
+enum rte_dma_direction {
+	/** DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_MEM,
+	/** DMA transfer direction - from memory to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is SoCs memory) to device (which is host memory).
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV,
+	/** DMA transfer direction - from device to memory.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to memory (which is SoCs memory).
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM,
+	/** DMA transfer direction - from device to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to the device (which is another host memory).
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV,
+};
+
+/**
+ * DMA access port type defines.
+ *
+ * @see struct rte_dma_port_param::port_type
+ */
+enum rte_dma_port_type {
+	RTE_DMA_PORT_NONE,
+	RTE_DMA_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dma_vchan_conf::src_port
+ * @see struct rte_dma_vchan_conf::dst_port
+ */
+struct rte_dma_port_param {
+	/** The device access port type.
+	 *
+	 * @see enum rte_dma_port_type
+	 */
+	enum rte_dma_port_type port_type;
+	union {
+		/** PCIe access port parameters.
+		 *
+		 * The following model shows SoC's PCIe module connects to
+		 * multiple PCIe hosts and multiple endpoints. The PCIe module
+		 * has an integrated DMA controller.
+		 *
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * \code{.unparsed}
+		 * System Bus
+		 *    |     ----------PCIe module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIe Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIe Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIe Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * \endcode
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		struct {
+			uint64_t coreid : 4; /**< PCIe core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			/** The pasid filed in TLP packet. */
+			uint64_t pasid : 20;
+			/** The attributes filed in TLP packet. */
+			uint64_t attr : 3;
+			/** The processing hint filed in TLP packet. */
+			uint64_t ph : 2;
+			/** The steering tag filed in TLP packet. */
+			uint64_t st : 16;
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ *
+ * @see rte_dma_vchan_setup
+ */
+struct rte_dma_vchan_conf {
+	/** Transfer direction
+	 *
+	 * @see enum rte_dma_direction
+	 */
+	enum rte_dma_direction direction;
+	/** Number of descriptor for the virtual DMA channel */
+	uint16_t nb_desc;
+	/** 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 *
+	 * @see struct rte_dma_port_param
+	 */
+	struct rte_dma_port_param src_port;
+	/** 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 *
+	 * @see struct rte_dma_port_param
+	 */
+	struct rte_dma_port_param dst_port;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel. The value must be in the range
+ *   [0, nb_vchans - 1] previously supplied to rte_dma_configure().
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dma_vchan_conf object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+			const struct rte_dma_vchan_conf *conf);
+
+/**
+ * A structure used to retrieve statistics.
+ *
+ * @see rte_dma_stats_get
+ */
+struct rte_dma_stats {
+	/** Count of operations which were submitted to hardware. */
+	uint64_t submitted;
+	/** Count of operations which were completed, including successful and
+	 * failed completions.
+	 */
+	uint64_t completed;
+	/** Count of operations which failed to complete. */
+	uint64_t errors;
+};
+
+/**
+ * Special ID, which is used to represent all virtual DMA channels.
+ *
+ * @see rte_dma_stats_get
+ * @see rte_dma_stats_reset
+ */
+#define RTE_DMA_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMA_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dma_stats
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stats_get(int16_t dev_id, uint16_t vchan,
+		      struct rte_dma_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMA_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stats_reset(int16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_dump(int16_t dev_id, FILE *f);
+
 #include "rte_dmadev_core.h"
 
 #ifdef __cplusplus
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index 5ed96853b2..d6f885527a 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -18,6 +18,43 @@
  *
  */
 
+struct rte_dma_dev;
+
+/** @internal Used to get device information of a device. */
+typedef int (*rte_dma_info_get_t)(const struct rte_dma_dev *dev,
+				  struct rte_dma_info *dev_info,
+				  uint32_t info_sz);
+
+/** @internal Used to configure a device. */
+typedef int (*rte_dma_configure_t)(struct rte_dma_dev *dev,
+				   const struct rte_dma_conf *dev_conf,
+				   uint32_t conf_sz);
+
+/** @internal Used to start a configured device. */
+typedef int (*rte_dma_start_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to stop a configured device. */
+typedef int (*rte_dma_stop_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to close a configured device. */
+typedef int (*rte_dma_close_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to allocate and set up a virtual DMA channel. */
+typedef int (*rte_dma_vchan_setup_t)(struct rte_dma_dev *dev, uint16_t vchan,
+				const struct rte_dma_vchan_conf *conf,
+				uint32_t conf_sz);
+
+/** @internal Used to retrieve basic statistics. */
+typedef int (*rte_dma_stats_get_t)(const struct rte_dma_dev *dev,
+			uint16_t vchan, struct rte_dma_stats *stats,
+			uint32_t stats_sz);
+
+/** @internal Used to reset basic statistics. */
+typedef int (*rte_dma_stats_reset_t)(struct rte_dma_dev *dev, uint16_t vchan);
+
+/** @internal Used to dump internal information. */
+typedef int (*rte_dma_dump_t)(const struct rte_dma_dev *dev, FILE *f);
+
 /**
  * Possible states of a DMA device.
  *
@@ -32,7 +69,26 @@ enum rte_dma_dev_state {
 };
 
 /**
- * @internal
+ * DMA device operations function pointer table.
+ *
+ * @see struct rte_dma_dev:dev_ops
+ */
+struct rte_dma_dev_ops {
+	rte_dma_info_get_t       dev_info_get;
+	rte_dma_configure_t      dev_configure;
+	rte_dma_start_t          dev_start;
+	rte_dma_stop_t           dev_stop;
+	rte_dma_close_t          dev_close;
+
+	rte_dma_vchan_setup_t    vchan_setup;
+
+	rte_dma_stats_get_t      stats_get;
+	rte_dma_stats_reset_t    stats_reset;
+
+	rte_dma_dump_t           dev_dump;
+};
+
+/** @internal
  * The generic data structure associated with each DMA device.
  */
 struct rte_dma_dev {
@@ -40,9 +96,13 @@ struct rte_dma_dev {
 	int16_t dev_id; /**< Device [external] identifier. */
 	int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */
 	void *dev_private; /**< PMD-specific private data. */
+	/** Functions exported by PMD. */
+	const struct rte_dma_dev_ops *dev_ops;
+	struct rte_dma_conf dev_conf; /**< DMA device configuration. */
 	/** Device info which supplied during device initialization. */
 	struct rte_device *device;
 	enum rte_dma_dev_state state; /**< Flag indicating the device state. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
 	uint64_t reserved[2]; /**< Reserved for future fields. */
 } __rte_cache_aligned;
 
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index 56ea0332cb..6b7939b10f 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -1,10 +1,19 @@
 EXPERIMENTAL {
 	global:
 
+	rte_dma_close;
+	rte_dma_configure;
 	rte_dma_count_avail;
 	rte_dma_dev_max;
+	rte_dma_dump;
 	rte_dma_get_dev_id;
+	rte_dma_info_get;
 	rte_dma_is_valid;
+	rte_dma_start;
+	rte_dma_stats_get;
+	rte_dma_stats_reset;
+	rte_dma_stop;
+	rte_dma_vchan_setup;
 
 	local: *;
 };
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v23 3/6] dmadev: add data plane function support
  2021-09-24 10:53 ` [dpdk-dev] [PATCH v23 0/6] support dmadev Chengwen Feng
  2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 1/6] dmadev: introduce DMA device library Chengwen Feng
  2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 2/6] dmadev: add control plane function support Chengwen Feng
@ 2021-09-24 10:53   ` Chengwen Feng
  2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 4/6] dmadev: add multi-process support Chengwen Feng
                     ` (2 subsequent siblings)
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-24 10:53 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch add data plane functions for dmadev.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 doc/guides/prog_guide/dmadev.rst       |  22 ++
 doc/guides/rel_notes/release_21_11.rst |   1 +
 lib/dmadev/rte_dmadev.h                | 460 +++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h           |  51 ++-
 lib/dmadev/version.map                 |   6 +
 5 files changed, 537 insertions(+), 3 deletions(-)

diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
index c2b0b0420b..de8b599d96 100644
--- a/doc/guides/prog_guide/dmadev.rst
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -103,3 +103,25 @@ can be used to get the device info and supported features.
 
 Silent mode is a special device capability which does not require the
 application to invoke dequeue APIs.
+
+
+Enqueue / Dequeue APIs
+~~~~~~~~~~~~~~~~~~~~~~
+
+Enqueue APIs such as ``rte_dma_copy`` and ``rte_dma_fill`` can be used to
+enqueue operations to hardware. If an enqueue is successful, a ``ring_idx`` is
+returned. This ``ring_idx`` can be used by applications to track per operation
+metadata in an application-defined circular ring.
+
+The ``rte_dma_submit`` API is used to issue doorbell to hardware.
+Alternatively the ``RTE_DMA_OP_FLAG_SUBMIT`` flag can be passed to the enqueue
+APIs to also issue the doorbell to hardware.
+
+There are two dequeue APIs ``rte_dma_completed`` and
+``rte_dma_completed_status``, these are used to obtain the results of the
+enqueue requests. ``rte_dma_completed`` will return the number of successfully
+completed operations. ``rte_dma_completed_status`` will return the number of
+completed operations along with the status of each operation (filled into the
+``status`` array passed by user). These two APIs can also return the last
+completed operation's ``ring_idx`` which could help user track operations within
+their own application-defined rings.
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 0aceaa8837..21b3c48257 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -110,6 +110,7 @@ New Features
 
   * Device allocation APIs.
   * Control plane APIs.
+  * Data plane APIs.
 
 
 Removed Items
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 5114c37446..84e30f7e61 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -59,6 +59,8 @@
  *     - rte_dma_vchan_setup()
  *     - rte_dma_start()
  *
+ * Then, the application can invoke dataplane functions to process jobs.
+ *
  * If the application wants to change the configuration (i.e. invoke
  * rte_dma_configure() or rte_dma_vchan_setup()), it must invoke
  * rte_dma_stop() first to stop the device and then do the reconfiguration
@@ -68,6 +70,77 @@
  * Finally, an application can close a dmadev by invoking the rte_dma_close()
  * function.
  *
+ * The dataplane APIs include two parts:
+ * The first part is the submission of operation requests:
+ *     - rte_dma_copy()
+ *     - rte_dma_copy_sg()
+ *     - rte_dma_fill()
+ *     - rte_dma_submit()
+ *
+ * These APIs could work with different virtual DMA channels which have
+ * different contexts.
+ *
+ * The first three APIs are used to submit the operation request to the virtual
+ * DMA channel, if the submission is successful, a positive
+ * ring_idx <= UINT16_MAX is returned, otherwise a negative number is returned.
+ *
+ * The last API is used to issue doorbell to hardware, and also there are flags
+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
+ * same work.
+ * @note When enqueuing a set of jobs to the device, having a separate submit
+ * outside a loop makes for clearer code than having a check for the last
+ * iteration inside the loop to set a special submit flag.  However, for cases
+ * where one item alone is to be submitted or there is a small set of jobs to
+ * be submitted sequentially, having a submit flag provides a lower-overhead
+ * way of doing the submission while still keeping the code clean.
+ *
+ * The second part is to obtain the result of requests:
+ *     - rte_dma_completed()
+ *         - return the number of operation requests completed successfully.
+ *     - rte_dma_completed_status()
+ *         - return the number of operation requests completed.
+ *
+ * @note If the dmadev works in silent mode (@see RTE_DMA_CAPA_SILENT),
+ * application does not invoke the above two completed APIs.
+ *
+ * About the ring_idx which enqueue APIs (e.g. rte_dma_copy(), rte_dma_fill())
+ * return, the rules are as follows:
+ *     - ring_idx for each virtual DMA channel are independent.
+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *       when it reach UINT16_MAX, it wraps back to zero.
+ *     - This ring_idx can be used by applications to track per-operation
+ *       metadata in an application-defined circular ring.
+ *     - The initial ring_idx of a virtual DMA channel is zero, after the
+ *       device is stopped, the ring_idx needs to be reset to zero.
+ *
+ * One example:
+ *     - step-1: start one dmadev
+ *     - step-2: enqueue a copy operation, the ring_idx return is 0
+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1
+ *     - ...
+ *     - step-101: stop the dmadev
+ *     - step-102: start the dmadev
+ *     - step-103: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *
+ * The DMA operation address used in enqueue APIs (i.e. rte_dma_copy(),
+ * rte_dma_copy_sg(), rte_dma_fill()) is defined as rte_iova_t type.
+ *
+ * The dmadev supports two types of address: memory address and device address.
+ *
+ * - memory address: the source and destination address of the memory-to-memory
+ * transfer type, or the source address of the memory-to-device transfer type,
+ * or the destination address of the device-to-memory transfer type.
+ * @note If the device support SVA (@see RTE_DMA_CAPA_SVA), the memory address
+ * can be any VA address, otherwise it must be an IOVA address.
+ *
+ * - device address: the source and destination address of the device-to-device
+ * transfer type, or the source address of the device-to-memory transfer type,
+ * or the destination address of the memory-to-device transfer type.
+ *
  * About MT-safe, all the functions of the dmadev API exported by a PMD are
  * lock-free functions which assume to not be invoked in parallel on different
  * logical cores to work on the same target dmadev object.
@@ -605,8 +678,395 @@ int rte_dma_stats_reset(int16_t dev_id, uint16_t vchan);
 __rte_experimental
 int rte_dma_dump(int16_t dev_id, FILE *f);
 
+/**
+ * DMA transfer result status code defines.
+ *
+ * @see rte_dma_completed_status
+ */
+enum rte_dma_status_code {
+	/** The operation completed successfully. */
+	RTE_DMA_STATUS_SUCCESSFUL,
+	/** The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_USER_ABORT,
+	/** The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPTED,
+	/** The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/** The operation failed to complete due invalid destination address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/** The operation failed to complete due invalid source or destination
+	 * address, cover the case that only knows the address error, but not
+	 * sure which address error.
+	 */
+	RTE_DMA_STATUS_INVALID_ADDR,
+	/** The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/** The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/** The operation failed to complete due bus read error. */
+	RTE_DMA_STATUS_BUS_READ_ERROR,
+	/** The operation failed to complete due bus write error. */
+	RTE_DMA_STATUS_BUS_WRITE_ERROR,
+	/** The operation failed to complete due bus error, cover the case that
+	 * only knows the bus error, but not sure which direction error.
+	 */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/** The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DATA_POISION,
+	/** The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/** The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the memory-to-device/
+	 * device-to-memory/device-to-device transfer scenario.
+	 */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/** The operation failed to complete due lookup page fault. */
+	RTE_DMA_STATUS_PAGE_FAULT,
+	/** The operation failed to complete due unknown reason.
+	 * The initial value is 256, which reserves space for future errors.
+	 */
+	RTE_DMA_STATUS_ERROR_UNKNOWN = 0x100,
+};
+
+/**
+ * A structure used to hold scatter-gather DMA operation request entry.
+ *
+ * @see rte_dma_copy_sg
+ */
+struct rte_dma_sge {
+	rte_iova_t addr; /**< The DMA operation address. */
+	uint32_t length; /**< The DMA operation length. */
+};
+
 #include "rte_dmadev_core.h"
 
+/** DMA fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ * If the specify DMA HW works in-order (it means it has default fence between
+ * operations), this flag could be NOP.
+ *
+ * @see rte_dma_copy()
+ * @see rte_dma_copy_sg()
+ * @see rte_dma_fill()
+ */
+#define RTE_DMA_OP_FLAG_FENCE	RTE_BIT64(0)
+/** DMA submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ *
+ * @see rte_dma_copy()
+ * @see rte_dma_copy_sg()
+ * @see rte_dma_fill()
+ */
+#define RTE_DMA_OP_FLAG_SUBMIT	RTE_BIT64(1)
+/** DMA write data to low level cache hint.
+ * Used for performance optimization, this is just a hint, and there is no
+ * capability bit for this, driver should not return error if this flag was set.
+ *
+ * @see rte_dma_copy()
+ * @see rte_dma_copy_sg()
+ * @see rte_dma_fill()
+ */
+#define RTE_DMA_OP_FLAG_LLC	RTE_BIT64(2)
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+static inline int
+rte_dma_copy(int16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+	     uint32_t length, uint64_t flags)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || !dev->dev_started ||
+	    vchan >= dev->dev_conf.nb_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
+#endif
+
+	return (*dev->copy)(dev, vchan, src, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter-gather list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter-gather list copy operation to be performed by
+ * hardware, if the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then
+ * trigger doorbell to begin this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The pointer of source scatter-gather entry array.
+ * @param dst
+ *   The pointer of destination scatter-gather entry array.
+ * @param nb_src
+ *   The number of source scatter-gather entry.
+ *   @see struct rte_dma_info::max_sges
+ * @param nb_dst
+ *   The number of destination scatter-gather entry.
+ *   @see struct rte_dma_info::max_sges
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+static inline int
+rte_dma_copy_sg(int16_t dev_id, uint16_t vchan, struct rte_dma_sge *src,
+		struct rte_dma_sge *dst, uint16_t nb_src, uint16_t nb_dst,
+		uint64_t flags)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || !dev->dev_started ||
+	    vchan >= dev->dev_conf.nb_vchans ||
+	    src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
+#endif
+
+	return (*dev->copy_sg)(dev, vchan, src, dst, nb_src, nb_dst, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+static inline int
+rte_dma_fill(int16_t dev_id, uint16_t vchan, uint64_t pattern,
+	     rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || !dev->dev_started ||
+	    vchan >= dev->dev_conf.nb_vchans || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
+#endif
+
+	return (*dev->fill)(dev, vchan, pattern, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dma_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+static inline int
+rte_dma_submit(int16_t dev_id, uint16_t vchan)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || !dev->dev_started ||
+	    vchan >= dev->dev_conf.nb_vchans)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
+#endif
+
+	return (*dev->submit)(dev, vchan);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's ring_idx.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed. This return value
+ *   must be less than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dma_completed(int16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		  uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || !dev->dev_started ||
+	    vchan >= dev->dev_conf.nb_vchans || nb_cpls == 0)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*dev->completed)(dev, vchan, nb_cpls, last_idx, has_error);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Returns the number of operations that have been completed, and the
+ * operations result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's ring_idx.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   This is a pointer to an array of length 'nb_cpls' that holds the completion
+ *   status code of each operation.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that completed. This return value must be less
+ *   than or equal to the value of nb_cpls.
+ *   If this number is greater than zero (assuming n), then n values in the
+ *   status array are also set.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dma_completed_status(int16_t dev_id, uint16_t vchan,
+			 const uint16_t nb_cpls, uint16_t *last_idx,
+			 enum rte_dma_status_code *status)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || !dev->dev_started ||
+	    vchan >= dev->dev_conf.nb_vchans ||
+	    nb_cpls == 0 || status == NULL)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*dev->completed_status)(dev, vchan, nb_cpls, last_idx, status);
+}
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index d6f885527a..5c202e35ce 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -55,6 +55,36 @@ typedef int (*rte_dma_stats_reset_t)(struct rte_dma_dev *dev, uint16_t vchan);
 /** @internal Used to dump internal information. */
 typedef int (*rte_dma_dump_t)(const struct rte_dma_dev *dev, FILE *f);
 
+/** @internal Used to enqueue a copy operation. */
+typedef int (*rte_dma_copy_t)(struct rte_dma_dev *dev, uint16_t vchan,
+			      rte_iova_t src, rte_iova_t dst,
+			      uint32_t length, uint64_t flags);
+
+/** @internal Used to enqueue a scatter-gather list copy operation. */
+typedef int (*rte_dma_copy_sg_t)(struct rte_dma_dev *dev, uint16_t vchan,
+				 const struct rte_dma_sge *src,
+				 const struct rte_dma_sge *dst,
+				 uint16_t nb_src, uint16_t nb_dst,
+				 uint64_t flags);
+
+/** @internal Used to enqueue a fill operation. */
+typedef int (*rte_dma_fill_t)(struct rte_dma_dev *dev, uint16_t vchan,
+			      uint64_t pattern, rte_iova_t dst,
+			      uint32_t length, uint64_t flags);
+
+/** @internal Used to trigger hardware to begin working. */
+typedef int (*rte_dma_submit_t)(struct rte_dma_dev *dev, uint16_t vchan);
+
+/** @internal Used to return number of successful completed operations. */
+typedef uint16_t (*rte_dma_completed_t)(struct rte_dma_dev *dev,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+
+/** @internal Used to return number of completed operations. */
+typedef uint16_t (*rte_dma_completed_status_t)(struct rte_dma_dev *dev,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+
 /**
  * Possible states of a DMA device.
  *
@@ -90,14 +120,29 @@ struct rte_dma_dev_ops {
 
 /** @internal
  * The generic data structure associated with each DMA device.
+ *
+ * The dataplane APIs are located at the beginning of the structure.
+ * And the 'dev_private' field was placed in the first cache line to optimize
+ * performance because the PMD driver mainly depends on this field.
  */
 struct rte_dma_dev {
-	char dev_name[RTE_DEV_NAME_MAX_LEN]; /**< Unique identifier name */
-	int16_t dev_id; /**< Device [external] identifier. */
-	int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */
 	void *dev_private; /**< PMD-specific private data. */
+	rte_dma_copy_t             copy;
+	rte_dma_copy_sg_t          copy_sg;
+	rte_dma_fill_t             fill;
+	rte_dma_submit_t           submit;
+	rte_dma_completed_t        completed;
+	rte_dma_completed_status_t completed_status;
+	void *reserved_cl0;
+	/** Reserve space for future IO functions, while keeping dev_ops
+	 * pointer on the second cacheline.
+	 */
+	void *reserved_cl1[7];
 	/** Functions exported by PMD. */
 	const struct rte_dma_dev_ops *dev_ops;
+	char dev_name[RTE_DEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	int16_t dev_id; /**< Device [external] identifier. */
+	int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */
 	struct rte_dma_conf dev_conf; /**< DMA device configuration. */
 	/** Device info which supplied during device initialization. */
 	struct rte_device *device;
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index 6b7939b10f..c780463bb2 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -2,10 +2,15 @@ EXPERIMENTAL {
 	global:
 
 	rte_dma_close;
+	rte_dma_completed;
+	rte_dma_completed_status;
 	rte_dma_configure;
+	rte_dma_copy;
+	rte_dma_copy_sg;
 	rte_dma_count_avail;
 	rte_dma_dev_max;
 	rte_dma_dump;
+	rte_dma_fill;
 	rte_dma_get_dev_id;
 	rte_dma_info_get;
 	rte_dma_is_valid;
@@ -13,6 +18,7 @@ EXPERIMENTAL {
 	rte_dma_stats_get;
 	rte_dma_stats_reset;
 	rte_dma_stop;
+	rte_dma_submit;
 	rte_dma_vchan_setup;
 
 	local: *;
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v23 4/6] dmadev: add multi-process support
  2021-09-24 10:53 ` [dpdk-dev] [PATCH v23 0/6] support dmadev Chengwen Feng
                     ` (2 preceding siblings ...)
  2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 3/6] dmadev: add data " Chengwen Feng
@ 2021-09-24 10:53   ` Chengwen Feng
  2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 5/6] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
  2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 6/6] app/test: add dmadev API test Chengwen Feng
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-24 10:53 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch add multi-process support for dmadev.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 doc/guides/rel_notes/release_21_11.rst |   1 +
 lib/dmadev/rte_dmadev.c                | 168 ++++++++++++++++++++-----
 lib/dmadev/rte_dmadev.h                |  24 ++--
 lib/dmadev/rte_dmadev_core.h           |  45 +++++--
 4 files changed, 185 insertions(+), 53 deletions(-)

diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 21b3c48257..67d2bf5101 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -111,6 +111,7 @@ New Features
   * Device allocation APIs.
   * Control plane APIs.
   * Data plane APIs.
+  * Multi-process support.
 
 
 Removed Items
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
index e0134b9eec..1338b29937 100644
--- a/lib/dmadev/rte_dmadev.c
+++ b/lib/dmadev/rte_dmadev.c
@@ -17,6 +17,13 @@
 
 struct rte_dma_dev *rte_dma_devices;
 static int16_t dma_devices_max;
+static struct {
+	/* Hold the dev_max information of the primary process. This field is
+	 * set by the primary process and is read by the secondary process.
+	 */
+	int16_t dev_max;
+	struct rte_dma_dev_data data[0];
+} *dma_devices_shared_data;
 
 RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
 #define RTE_DMA_LOG(level, fmt, args...) \
@@ -76,11 +83,11 @@ dma_find_free_dev(void)
 {
 	int16_t i;
 
-	if (rte_dma_devices == NULL)
+	if (rte_dma_devices == NULL || dma_devices_shared_data == NULL)
 		return -1;
 
 	for (i = 0; i < dma_devices_max; i++) {
-		if (rte_dma_devices[i].dev_name[0] == '\0')
+		if (dma_devices_shared_data->data[i].dev_name[0] == '\0')
 			return i;
 	}
 
@@ -97,7 +104,7 @@ dma_find(const char *name)
 
 	for (i = 0; i < dma_devices_max; i++) {
 		if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) &&
-		    (!strcmp(name, rte_dma_devices[i].dev_name)))
+		    (!strcmp(name, rte_dma_devices[i].data->dev_name)))
 			return &rte_dma_devices[i];
 	}
 
@@ -130,16 +137,65 @@ dma_process_data_prepare(void)
 	return 0;
 }
 
+static int
+dma_shared_data_prepare(void)
+{
+	const char *mz_name = "rte_dma_dev_data";
+	const struct rte_memzone *mz;
+	size_t size;
+
+	if (dma_devices_shared_data != NULL)
+		return 0;
+
+	size = sizeof(*dma_devices_shared_data) +
+		sizeof(struct rte_dma_dev_data) * dma_devices_max;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		mz = rte_memzone_reserve(mz_name, size, rte_socket_id(), 0);
+	else
+		mz = rte_memzone_lookup(mz_name);
+	if (mz == NULL)
+		return -ENOMEM;
+
+	dma_devices_shared_data = mz->addr;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		memset(dma_devices_shared_data, 0, size);
+		dma_devices_shared_data->dev_max = dma_devices_max;
+	} else {
+		dma_devices_max = dma_devices_shared_data->dev_max;
+	}
+
+	return 0;
+}
+
 static int
 dma_data_prepare(void)
 {
-	if (dma_devices_max == 0)
-		dma_devices_max = RTE_DMADEV_DEFAULT_MAX_DEVS;
-	return dma_process_data_prepare();
+	int ret;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		if (dma_devices_max == 0)
+			dma_devices_max = RTE_DMADEV_DEFAULT_MAX_DEVS;
+		ret = dma_process_data_prepare();
+		if (ret)
+			return ret;
+		ret = dma_shared_data_prepare();
+		if (ret)
+			return ret;
+	} else {
+		ret = dma_shared_data_prepare();
+		if (ret)
+			return ret;
+		ret = dma_process_data_prepare();
+		if (ret)
+			return ret;
+	}
+
+	return 0;
 }
 
 static struct rte_dma_dev *
-dma_allocate(const char *name, int numa_node, size_t private_data_size)
+dma_allocate_primary(const char *name, int numa_node, size_t private_data_size)
 {
 	struct rte_dma_dev *dev;
 	void *dev_private;
@@ -174,10 +230,55 @@ dma_allocate(const char *name, int numa_node, size_t private_data_size)
 
 	dev = &rte_dma_devices[dev_id];
 	dev->dev_private = dev_private;
-	rte_strscpy(dev->dev_name, name, sizeof(dev->dev_name));
-	dev->dev_id = dev_id;
-	dev->numa_node = numa_node;
-	dev->dev_private = dev_private;
+	dev->data = &dma_devices_shared_data->data[dev_id];
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+	dev->data->dev_id = dev_id;
+	dev->data->numa_node = numa_node;
+	dev->data->dev_private = dev_private;
+
+	return dev;
+}
+
+static struct rte_dma_dev *
+dma_attach_secondary(const char *name)
+{
+	struct rte_dma_dev *dev;
+	int16_t i;
+	int ret;
+
+	ret = dma_data_prepare();
+	if (ret < 0) {
+		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
+		return NULL;
+	}
+
+	for (i = 0; i < dma_devices_max; i++) {
+		if (!strcmp(dma_devices_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == dma_devices_max) {
+		RTE_DMA_LOG(ERR,
+			"Device %s is not driven by the primary process",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dma_devices[i];
+	dev->data = &dma_devices_shared_data->data[i];
+	dev->dev_private = dev->data->dev_private;
+
+	return dev;
+}
+
+static struct rte_dma_dev *
+dma_allocate(const char *name, int numa_node, size_t private_data_size)
+{
+	struct rte_dma_dev *dev;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dma_allocate_primary(name, numa_node, private_data_size);
+	else
+		dev = dma_attach_secondary(name);
 
 	return dev;
 }
@@ -185,7 +286,11 @@ dma_allocate(const char *name, int numa_node, size_t private_data_size)
 static void
 dma_release(struct rte_dma_dev *dev)
 {
-	rte_free(dev->dev_private);
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		memset(dev->data, 0, sizeof(struct rte_dma_dev_data));
+		rte_free(dev->dev_private);
+	}
+
 	memset(dev, 0, sizeof(struct rte_dma_dev));
 }
 
@@ -219,7 +324,7 @@ rte_dma_pmd_release(const char *name)
 		return -EINVAL;
 
 	if (dev->state == RTE_DMA_DEV_READY)
-		return rte_dma_close(dev->dev_id);
+		return rte_dma_close(dev->data->dev_id);
 
 	dma_release(dev);
 	return 0;
@@ -237,7 +342,7 @@ rte_dma_get_dev_id(const char *name)
 	if (dev == NULL)
 		return -EINVAL;
 
-	return dev->dev_id;
+	return dev->data->dev_id;
 }
 
 bool
@@ -283,7 +388,7 @@ rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
 		return ret;
 
 	dev_info->numa_node = dev->device->numa_node;
-	dev_info->nb_vchans = dev->dev_conf.nb_vchans;
+	dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
 
 	return 0;
 }
@@ -299,7 +404,7 @@ rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
 	if (dev_conf == NULL)
 		return -EINVAL;
 
-	if (dev->dev_started != 0) {
+	if (dev->data->dev_started != 0) {
 		RTE_DMA_LOG(ERR,
 			"Device %d must be stopped to allow configuration",
 			dev_id);
@@ -331,7 +436,8 @@ rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
 	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
 					     sizeof(struct rte_dma_conf));
 	if (ret == 0)
-		memcpy(&dev->dev_conf, dev_conf, sizeof(struct rte_dma_conf));
+		memcpy(&dev->data->dev_conf, dev_conf,
+		       sizeof(struct rte_dma_conf));
 
 	return ret;
 }
@@ -344,12 +450,12 @@ rte_dma_start(int16_t dev_id)
 
 	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
 
-	if (dev->dev_conf.nb_vchans == 0) {
+	if (dev->data->dev_conf.nb_vchans == 0) {
 		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
 		return -EINVAL;
 	}
 
-	if (dev->dev_started != 0) {
+	if (dev->data->dev_started != 0) {
 		RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
 		return 0;
 	}
@@ -362,7 +468,7 @@ rte_dma_start(int16_t dev_id)
 		return ret;
 
 mark_started:
-	dev->dev_started = 1;
+	dev->data->dev_started = 1;
 	return 0;
 }
 
@@ -374,7 +480,7 @@ rte_dma_stop(int16_t dev_id)
 
 	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
 
-	if (dev->dev_started == 0) {
+	if (dev->data->dev_started == 0) {
 		RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
 		return 0;
 	}
@@ -387,7 +493,7 @@ rte_dma_stop(int16_t dev_id)
 		return ret;
 
 mark_stopped:
-	dev->dev_started = 0;
+	dev->data->dev_started = 0;
 	return 0;
 }
 
@@ -400,7 +506,7 @@ rte_dma_close(int16_t dev_id)
 	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
 
 	/* Device must be stopped before it can be closed */
-	if (dev->dev_started == 1) {
+	if (dev->data->dev_started == 1) {
 		RTE_DMA_LOG(ERR,
 			"Device %d must be stopped before closing", dev_id);
 		return -EBUSY;
@@ -427,7 +533,7 @@ rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
 	if (conf == NULL)
 		return -EINVAL;
 
-	if (dev->dev_started != 0) {
+	if (dev->data->dev_started != 0) {
 		RTE_DMA_LOG(ERR,
 			"Device %d must be stopped to allow configuration",
 			dev_id);
@@ -439,7 +545,7 @@ rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
 		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
 		return -EINVAL;
 	}
-	if (dev->dev_conf.nb_vchans == 0) {
+	if (dev->data->dev_conf.nb_vchans == 0) {
 		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
 		return -EINVAL;
 	}
@@ -513,7 +619,7 @@ rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
 	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
 	if (stats == NULL)
 		return -EINVAL;
-	if (vchan >= dev->dev_conf.nb_vchans &&
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
 	    vchan != RTE_DMA_ALL_VCHAN) {
 		RTE_DMA_LOG(ERR,
 			"Device %d vchan %u out of range", dev_id, vchan);
@@ -532,7 +638,7 @@ rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
 
 	RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
-	if (vchan >= dev->dev_conf.nb_vchans &&
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
 	    vchan != RTE_DMA_ALL_VCHAN) {
 		RTE_DMA_LOG(ERR,
 			"Device %d vchan %u out of range", dev_id, vchan);
@@ -606,14 +712,14 @@ rte_dma_dump(int16_t dev_id, FILE *f)
 	}
 
 	(void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
-		dev->dev_id,
-		dev->dev_name,
-		dev->dev_started ? "started" : "stopped");
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
 	dma_dump_capability(f, dev_info.dev_capa);
 	(void)fprintf(f, "  max_vchans_supported: %u\n", dev_info.max_vchans);
 	(void)fprintf(f, "  nb_vchans_configured: %u\n", dev_info.nb_vchans);
 	(void)fprintf(f, "  silent_mode: %s\n",
-		dev->dev_conf.enable_silent ? "on" : "off");
+		dev->data->dev_conf.enable_silent ? "on" : "off");
 
 	if (dev->dev_ops->dev_dump != NULL)
 		return (*dev->dev_ops->dev_dump)(dev, f);
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 84e30f7e61..561a1b1154 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -821,8 +821,8 @@ rte_dma_copy(int16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
 
 #ifdef RTE_DMADEV_DEBUG
-	if (!rte_dma_is_valid(dev_id) || !dev->dev_started ||
-	    vchan >= dev->dev_conf.nb_vchans || length == 0)
+	if (!rte_dma_is_valid(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans || length == 0)
 		return -EINVAL;
 	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy, -ENOTSUP);
 #endif
@@ -872,8 +872,8 @@ rte_dma_copy_sg(int16_t dev_id, uint16_t vchan, struct rte_dma_sge *src,
 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
 
 #ifdef RTE_DMADEV_DEBUG
-	if (!rte_dma_is_valid(dev_id) || !dev->dev_started ||
-	    vchan >= dev->dev_conf.nb_vchans ||
+	if (!rte_dma_is_valid(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans ||
 	    src == NULL || dst == NULL || nb_src == 0 || nb_dst == 0)
 		return -EINVAL;
 	RTE_FUNC_PTR_OR_ERR_RET(*dev->copy_sg, -ENOTSUP);
@@ -919,8 +919,8 @@ rte_dma_fill(int16_t dev_id, uint16_t vchan, uint64_t pattern,
 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
 
 #ifdef RTE_DMADEV_DEBUG
-	if (!rte_dma_is_valid(dev_id) || !dev->dev_started ||
-	    vchan >= dev->dev_conf.nb_vchans || length == 0)
+	if (!rte_dma_is_valid(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans || length == 0)
 		return -EINVAL;
 	RTE_FUNC_PTR_OR_ERR_RET(*dev->fill, -ENOTSUP);
 #endif
@@ -952,8 +952,8 @@ rte_dma_submit(int16_t dev_id, uint16_t vchan)
 	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
 
 #ifdef RTE_DMADEV_DEBUG
-	if (!rte_dma_is_valid(dev_id) || !dev->dev_started ||
-	    vchan >= dev->dev_conf.nb_vchans)
+	if (!rte_dma_is_valid(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans)
 		return -EINVAL;
 	RTE_FUNC_PTR_OR_ERR_RET(*dev->submit, -ENOTSUP);
 #endif
@@ -994,8 +994,8 @@ rte_dma_completed(int16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
 	bool err;
 
 #ifdef RTE_DMADEV_DEBUG
-	if (!rte_dma_is_valid(dev_id) || !dev->dev_started ||
-	    vchan >= dev->dev_conf.nb_vchans || nb_cpls == 0)
+	if (!rte_dma_is_valid(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans || nb_cpls == 0)
 		return 0;
 	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed, 0);
 #endif
@@ -1054,8 +1054,8 @@ rte_dma_completed_status(int16_t dev_id, uint16_t vchan,
 	uint16_t idx;
 
 #ifdef RTE_DMADEV_DEBUG
-	if (!rte_dma_is_valid(dev_id) || !dev->dev_started ||
-	    vchan >= dev->dev_conf.nb_vchans ||
+	if (!rte_dma_is_valid(dev_id) || !dev->data->dev_started ||
+	    vchan >= dev->data->dev_conf.nb_vchans ||
 	    nb_cpls == 0 || status == NULL)
 		return 0;
 	RTE_FUNC_PTR_OR_ERR_RET(*dev->completed_status, 0);
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index 5c202e35ce..019ac7af9c 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -118,10 +118,39 @@ struct rte_dma_dev_ops {
 	rte_dma_dump_t           dev_dump;
 };
 
-/** @internal
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ *
+ * @see struct rte_dmadev::data
+ */
+struct rte_dma_dev_data {
+	char dev_name[RTE_DEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	int16_t dev_id; /**< Device [external] identifier. */
+	int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */
+	/** PMD-specific private data.
+	 * This is a copy of the 'dev_private' field in the 'struct rte_dmadev'
+	 * from primary process, it is used by the secondary process to get
+	 * dev_private information.
+	 */
+	void *dev_private;
+	struct rte_dma_conf dev_conf; /**< DMA device configuration. */
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
+/**
+ * @internal
  * The generic data structure associated with each DMA device.
  *
- * The dataplane APIs are located at the beginning of the structure.
+ * The dataplane APIs are located at the beginning of the structure, along
+ * with the pointer to where all the data elements for the particular device
+ * are stored in shared memory. This split scheme allows the function pointer
+ * and driver data to be per-process, while the actual configuration data for
+ * the device is shared.
  * And the 'dev_private' field was placed in the first cache line to optimize
  * performance because the PMD driver mainly depends on this field.
  */
@@ -134,20 +163,16 @@ struct rte_dma_dev {
 	rte_dma_completed_t        completed;
 	rte_dma_completed_status_t completed_status;
 	void *reserved_cl0;
-	/** Reserve space for future IO functions, while keeping dev_ops
-	 * pointer on the second cacheline.
+	/** Reserve space for future IO functions, while keeping data and
+	 * dev_ops pointers on the second cacheline.
 	 */
-	void *reserved_cl1[7];
+	void *reserved_cl1[6];
+	struct rte_dma_dev_data *data; /**< Pointer to device data. */
 	/** Functions exported by PMD. */
 	const struct rte_dma_dev_ops *dev_ops;
-	char dev_name[RTE_DEV_NAME_MAX_LEN]; /**< Unique identifier name */
-	int16_t dev_id; /**< Device [external] identifier. */
-	int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */
-	struct rte_dma_conf dev_conf; /**< DMA device configuration. */
 	/** Device info which supplied during device initialization. */
 	struct rte_device *device;
 	enum rte_dma_dev_state state; /**< Flag indicating the device state. */
-	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
 	uint64_t reserved[2]; /**< Reserved for future fields. */
 } __rte_cache_aligned;
 
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v23 5/6] dma/skeleton: introduce skeleton dmadev driver
  2021-09-24 10:53 ` [dpdk-dev] [PATCH v23 0/6] support dmadev Chengwen Feng
                     ` (3 preceding siblings ...)
  2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 4/6] dmadev: add multi-process support Chengwen Feng
@ 2021-09-24 10:53   ` Chengwen Feng
  2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 6/6] app/test: add dmadev API test Chengwen Feng
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-24 10:53 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

Skeleton dmadevice driver, on the lines of rawdev skeleton, is for
showcasing of the dmadev library.

Design of skeleton involves a virtual device which is plugged into VDEV
bus on initialization.

Also, enable compilation of dmadev skeleton drivers.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 MAINTAINERS                            |   1 +
 drivers/dma/meson.build                |   4 +-
 drivers/dma/skeleton/meson.build       |   7 +
 drivers/dma/skeleton/skeleton_dmadev.c | 570 +++++++++++++++++++++++++
 drivers/dma/skeleton/skeleton_dmadev.h |  61 +++
 drivers/dma/skeleton/version.map       |   3 +
 6 files changed, 645 insertions(+), 1 deletion(-)
 create mode 100644 drivers/dma/skeleton/meson.build
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
 create mode 100644 drivers/dma/skeleton/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index a5b11ac70b..85d4f83395 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -457,6 +457,7 @@ F: doc/guides/regexdevs/features/default.ini
 DMA device API - EXPERIMENTAL
 M: Chengwen Feng <fengchengwen@huawei.com>
 F: lib/dmadev/
+F: drivers/dma/skeleton/
 F: doc/guides/prog_guide/dmadev.rst
 
 Eventdev API
diff --git a/drivers/dma/meson.build b/drivers/dma/meson.build
index a24c56d8ff..d9c7ede32f 100644
--- a/drivers/dma/meson.build
+++ b/drivers/dma/meson.build
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright 2021 HiSilicon Limited
 
-drivers = []
+drivers = [
+        'skeleton',
+]
diff --git a/drivers/dma/skeleton/meson.build b/drivers/dma/skeleton/meson.build
new file mode 100644
index 0000000000..8871b80956
--- /dev/null
+++ b/drivers/dma/skeleton/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited
+
+deps += ['dmadev', 'kvargs', 'ring', 'bus_vdev']
+sources = files(
+        'skeleton_dmadev.c',
+)
diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c
new file mode 100644
index 0000000000..a7d55b8ca0
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.c
@@ -0,0 +1,570 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+#include <inttypes.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_cycles.h>
+#include <rte_eal.h>
+#include <rte_kvargs.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+
+#include <rte_dmadev_pmd.h>
+
+#include "skeleton_dmadev.h"
+
+RTE_LOG_REGISTER_DEFAULT(skeldma_logtype, INFO);
+#define SKELDMA_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, skeldma_logtype, "%s(): " fmt "\n", \
+		__func__, ##args)
+
+/* Count of instances, currently only 1 is supported. */
+static uint16_t skeldma_count;
+
+static int
+skeldma_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
+		 uint32_t info_sz)
+{
+#define SKELDMA_MAX_DESC	8192
+#define SKELDMA_MIN_DESC	32
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(info_sz);
+
+	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
+			     RTE_DMA_CAPA_SVA |
+			     RTE_DMA_CAPA_OPS_COPY;
+	dev_info->max_vchans = 1;
+	dev_info->max_desc = SKELDMA_MAX_DESC;
+	dev_info->min_desc = SKELDMA_MIN_DESC;
+
+	return 0;
+}
+
+static int
+skeldma_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf,
+		  uint32_t conf_sz)
+{
+	RTE_SET_USED(dev);
+	RTE_SET_USED(conf);
+	RTE_SET_USED(conf_sz);
+	return 0;
+}
+
+static void *
+cpucopy_thread(void *param)
+{
+#define SLEEP_THRESHOLD		10000
+#define SLEEP_US_VAL		10
+
+	struct rte_dma_dev *dev = param;
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	int ret;
+
+	while (!hw->exit_flag) {
+		ret = rte_ring_dequeue(hw->desc_running, (void **)&desc);
+		if (ret) {
+			hw->zero_req_count++;
+			if (hw->zero_req_count == 0)
+				hw->zero_req_count = SLEEP_THRESHOLD;
+			if (hw->zero_req_count >= SLEEP_THRESHOLD)
+				rte_delay_us_sleep(SLEEP_US_VAL);
+			continue;
+		}
+
+		hw->zero_req_count = 0;
+		rte_memcpy(desc->dst, desc->src, desc->len);
+		hw->completed_count++;
+		(void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
+	}
+
+	return NULL;
+}
+
+static void
+fflush_ring(struct skeldma_hw *hw, struct rte_ring *ring)
+{
+	struct skeldma_desc *desc = NULL;
+	while (rte_ring_count(ring) > 0) {
+		(void)rte_ring_dequeue(ring, (void **)&desc);
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+}
+
+static int
+skeldma_start(struct rte_dma_dev *dev)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	rte_cpuset_t cpuset;
+	int ret;
+
+	if (hw->desc_mem == NULL) {
+		SKELDMA_LOG(ERR, "Vchan was not setup, start fail!");
+		return -EINVAL;
+	}
+
+	/* Reset the dmadev to a known state, include:
+	 * 1) fflush pending/running/completed ring to empty ring.
+	 * 2) init ring idx to zero.
+	 * 3) init running statistics.
+	 * 4) mark cpucopy task exit_flag to false.
+	 */
+	fflush_ring(hw, hw->desc_pending);
+	fflush_ring(hw, hw->desc_running);
+	fflush_ring(hw, hw->desc_completed);
+	hw->ridx = 0;
+	hw->submitted_count = 0;
+	hw->zero_req_count = 0;
+	hw->completed_count = 0;
+	hw->exit_flag = false;
+
+	rte_mb();
+
+	ret = rte_ctrl_thread_create(&hw->thread, "dma_skeleton", NULL,
+				     cpucopy_thread, dev);
+	if (ret) {
+		SKELDMA_LOG(ERR, "Start cpucopy thread fail!");
+		return -EINVAL;
+	}
+
+	if (hw->lcore_id != -1) {
+		cpuset = rte_lcore_cpuset(hw->lcore_id);
+		ret = pthread_setaffinity_np(hw->thread, sizeof(cpuset),
+					     &cpuset);
+		if (ret)
+			SKELDMA_LOG(WARNING,
+				"Set thread affinity lcore = %d fail!",
+				hw->lcore_id);
+	}
+
+	return 0;
+}
+
+static int
+skeldma_stop(struct rte_dma_dev *dev)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	hw->exit_flag = true;
+	rte_delay_ms(1);
+
+	pthread_cancel(hw->thread);
+	pthread_join(hw->thread, NULL);
+
+	return 0;
+}
+
+static int
+vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc)
+{
+	struct skeldma_desc *desc;
+	struct rte_ring *empty;
+	struct rte_ring *pending;
+	struct rte_ring *running;
+	struct rte_ring *completed;
+	uint16_t i;
+
+	desc = rte_zmalloc_socket("dma_skelteon_desc",
+				  nb_desc * sizeof(struct skeldma_desc),
+				  RTE_CACHE_LINE_SIZE, hw->socket_id);
+	if (desc == NULL) {
+		SKELDMA_LOG(ERR, "Malloc dma skeleton desc fail!");
+		return -ENOMEM;
+	}
+
+	empty = rte_ring_create("dma_skeleton_desc_empty", nb_desc,
+				hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	pending = rte_ring_create("dma_skeleton_desc_pending", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	running = rte_ring_create("dma_skeleton_desc_running", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	completed = rte_ring_create("dma_skeleton_desc_completed", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (empty == NULL || pending == NULL || running == NULL ||
+	    completed == NULL) {
+		SKELDMA_LOG(ERR, "Create dma skeleton desc ring fail!");
+		rte_ring_free(empty);
+		rte_ring_free(pending);
+		rte_ring_free(running);
+		rte_ring_free(completed);
+		rte_free(desc);
+		return -ENOMEM;
+	}
+
+	/* The real usable ring size is *count-1* instead of *count* to
+	 * differentiate a free ring from an empty ring.
+	 * @see rte_ring_create
+	 */
+	for (i = 0; i < nb_desc - 1; i++)
+		(void)rte_ring_enqueue(empty, (void *)(desc + i));
+
+	hw->desc_mem = desc;
+	hw->desc_empty = empty;
+	hw->desc_pending = pending;
+	hw->desc_running = running;
+	hw->desc_completed = completed;
+
+	return 0;
+}
+
+static void
+vchan_release(struct skeldma_hw *hw)
+{
+	if (hw->desc_mem == NULL)
+		return;
+
+	rte_free(hw->desc_mem);
+	hw->desc_mem = NULL;
+	rte_ring_free(hw->desc_empty);
+	hw->desc_empty = NULL;
+	rte_ring_free(hw->desc_pending);
+	hw->desc_pending = NULL;
+	rte_ring_free(hw->desc_running);
+	hw->desc_running = NULL;
+	rte_ring_free(hw->desc_completed);
+	hw->desc_completed = NULL;
+}
+
+static int
+skeldma_close(struct rte_dma_dev *dev)
+{
+	/* The device already stopped */
+	vchan_release(dev->dev_private);
+	return 0;
+}
+
+static int
+skeldma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
+		    const struct rte_dma_vchan_conf *conf,
+		    uint32_t conf_sz)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(conf_sz);
+
+	if (!rte_is_power_of_2(conf->nb_desc)) {
+		SKELDMA_LOG(ERR, "Number of desc must be power of 2!");
+		return -EINVAL;
+	}
+
+	vchan_release(hw);
+	return vchan_setup(hw, conf->nb_desc);
+}
+
+static int
+skeldma_stats_get(const struct rte_dma_dev *dev, uint16_t vchan,
+		  struct rte_dma_stats *stats, uint32_t stats_sz)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(stats_sz);
+
+	stats->submitted = hw->submitted_count;
+	stats->completed = hw->completed_count;
+	stats->errors = 0;
+
+	return 0;
+}
+
+static int
+skeldma_stats_reset(struct rte_dma_dev *dev, uint16_t vchan)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+
+	RTE_SET_USED(vchan);
+
+	hw->submitted_count = 0;
+	hw->completed_count = 0;
+
+	return 0;
+}
+
+static int
+skeldma_dump(const struct rte_dma_dev *dev, FILE *f)
+{
+#define GET_RING_COUNT(ring)	((ring) ? (rte_ring_count(ring)) : 0)
+
+	struct skeldma_hw *hw = dev->dev_private;
+
+	(void)fprintf(f,
+		"    lcore_id: %d\n"
+		"    socket_id: %d\n"
+		"    desc_empty_ring_count: %u\n"
+		"    desc_pending_ring_count: %u\n"
+		"    desc_running_ring_count: %u\n"
+		"    desc_completed_ring_count: %u\n",
+		hw->lcore_id, hw->socket_id,
+		GET_RING_COUNT(hw->desc_empty),
+		GET_RING_COUNT(hw->desc_pending),
+		GET_RING_COUNT(hw->desc_running),
+		GET_RING_COUNT(hw->desc_completed));
+	(void)fprintf(f,
+		"    next_ring_idx: %u\n"
+		"    submitted_count: %" PRIu64 "\n"
+		"    completed_count: %" PRIu64 "\n",
+		hw->ridx, hw->submitted_count, hw->completed_count);
+
+	return 0;
+}
+
+static inline void
+submit(struct skeldma_hw *hw, struct skeldma_desc *desc)
+{
+	uint16_t count = rte_ring_count(hw->desc_pending);
+	struct skeldma_desc *pend_desc = NULL;
+
+	while (count > 0) {
+		(void)rte_ring_dequeue(hw->desc_pending, (void **)&pend_desc);
+		(void)rte_ring_enqueue(hw->desc_running, (void *)pend_desc);
+		count--;
+	}
+
+	if (desc)
+		(void)rte_ring_enqueue(hw->desc_running, (void *)desc);
+}
+
+static int
+skeldma_copy(struct rte_dma_dev *dev, uint16_t vchan,
+	     rte_iova_t src, rte_iova_t dst,
+	     uint32_t length, uint64_t flags)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc;
+	int ret;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(flags);
+
+	ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc);
+	if (ret)
+		return -ENOSPC;
+	desc->src = (void *)(uintptr_t)src;
+	desc->dst = (void *)(uintptr_t)dst;
+	desc->len = length;
+	desc->ridx = hw->ridx;
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT)
+		submit(hw, desc);
+	else
+		(void)rte_ring_enqueue(hw->desc_pending, (void *)desc);
+	hw->submitted_count++;
+
+	return hw->ridx++;
+}
+
+static int
+skeldma_submit(struct rte_dma_dev *dev, uint16_t vchan)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	RTE_SET_USED(vchan);
+	submit(hw, NULL);
+	return 0;
+}
+
+static uint16_t
+skeldma_completed(struct rte_dma_dev *dev,
+		  uint16_t vchan, const uint16_t nb_cpls,
+		  uint16_t *last_idx, bool *has_error)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	uint16_t index = 0;
+	uint16_t count;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(has_error);
+
+	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+	while (index < count) {
+		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+		if (index == count - 1)
+			*last_idx = desc->ridx;
+		index++;
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+
+	return count;
+}
+
+static uint16_t
+skeldma_completed_status(struct rte_dma_dev *dev,
+			 uint16_t vchan, const uint16_t nb_cpls,
+			 uint16_t *last_idx, enum rte_dma_status_code *status)
+{
+	struct skeldma_hw *hw = dev->dev_private;
+	struct skeldma_desc *desc = NULL;
+	uint16_t index = 0;
+	uint16_t count;
+
+	RTE_SET_USED(vchan);
+
+	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+	while (index < count) {
+		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+		if (index == count - 1)
+			*last_idx = desc->ridx;
+		status[index++] = RTE_DMA_STATUS_SUCCESSFUL;
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+
+	return count;
+}
+
+static const struct rte_dma_dev_ops skeldma_ops = {
+	.dev_info_get  = skeldma_info_get,
+	.dev_configure = skeldma_configure,
+	.dev_start     = skeldma_start,
+	.dev_stop      = skeldma_stop,
+	.dev_close     = skeldma_close,
+
+	.vchan_setup   = skeldma_vchan_setup,
+
+	.stats_get     = skeldma_stats_get,
+	.stats_reset   = skeldma_stats_reset,
+
+	.dev_dump      = skeldma_dump,
+};
+
+static int
+skeldma_create(const char *name, struct rte_vdev_device *vdev, int lcore_id)
+{
+	struct rte_dma_dev *dev;
+	struct skeldma_hw *hw;
+	int socket_id;
+
+	socket_id = (lcore_id < 0) ? rte_socket_id() :
+				     rte_lcore_to_socket_id(lcore_id);
+	dev = rte_dma_pmd_allocate(name, socket_id, sizeof(struct skeldma_hw));
+	if (dev == NULL) {
+		SKELDMA_LOG(ERR, "Unable to allocate dmadev: %s", name);
+		return -EINVAL;
+	}
+
+	dev->copy = skeldma_copy;
+	dev->submit = skeldma_submit;
+	dev->completed = skeldma_completed;
+	dev->completed_status = skeldma_completed_status;
+	dev->dev_ops = &skeldma_ops;
+	dev->device = &vdev->device;
+
+	hw = dev->dev_private;
+	hw->lcore_id = lcore_id;
+	hw->socket_id = socket_id;
+
+	dev->state = RTE_DMA_DEV_READY;
+
+	return dev->data->dev_id;
+}
+
+static int
+skeldma_destroy(const char *name)
+{
+	return rte_dma_pmd_release(name);
+}
+
+static int
+skeldma_parse_lcore(const char *key __rte_unused,
+		    const char *value,
+		    void *opaque)
+{
+	int lcore_id = atoi(value);
+	if (lcore_id >= 0 && lcore_id < RTE_MAX_LCORE)
+		*(int *)opaque = lcore_id;
+	return 0;
+}
+
+static void
+skeldma_parse_vdev_args(struct rte_vdev_device *vdev, int *lcore_id)
+{
+	static const char *const args[] = {
+		SKELDMA_ARG_LCORE,
+		NULL
+	};
+
+	struct rte_kvargs *kvlist;
+	const char *params;
+
+	params = rte_vdev_device_args(vdev);
+	if (params == NULL || params[0] == '\0')
+		return;
+
+	kvlist = rte_kvargs_parse(params, args);
+	if (!kvlist)
+		return;
+
+	(void)rte_kvargs_process(kvlist, SKELDMA_ARG_LCORE,
+				 skeldma_parse_lcore, lcore_id);
+	SKELDMA_LOG(INFO, "Parse lcore_id = %d", *lcore_id);
+
+	rte_kvargs_free(kvlist);
+}
+
+static int
+skeldma_probe(struct rte_vdev_device *vdev)
+{
+	const char *name;
+	int lcore_id = -1;
+	int ret;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		SKELDMA_LOG(ERR, "Multiple process not supported for %s", name);
+		return -EINVAL;
+	}
+
+	/* More than one instance is not supported */
+	if (skeldma_count > 0) {
+		SKELDMA_LOG(ERR, "Multiple instance not supported for %s",
+			name);
+		return -EINVAL;
+	}
+
+	skeldma_parse_vdev_args(vdev, &lcore_id);
+
+	ret = skeldma_create(name, vdev, lcore_id);
+	if (ret >= 0) {
+		SKELDMA_LOG(INFO, "Create %s dmadev with lcore-id %d",
+			name, lcore_id);
+		skeldma_count = 1;
+	}
+
+	return ret < 0 ? ret : 0;
+}
+
+static int
+skeldma_remove(struct rte_vdev_device *vdev)
+{
+	const char *name;
+	int ret;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -1;
+
+	ret = skeldma_destroy(name);
+	if (!ret) {
+		skeldma_count = 0;
+		SKELDMA_LOG(INFO, "Remove %s dmadev", name);
+	}
+
+	return ret;
+}
+
+static struct rte_vdev_driver skeldma_pmd_drv = {
+	.probe = skeldma_probe,
+	.remove = skeldma_remove,
+	.drv_flags = RTE_VDEV_DRV_NEED_IOVA_AS_VA,
+};
+
+RTE_PMD_REGISTER_VDEV(dma_skeleton, skeldma_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(dma_skeleton,
+		SKELDMA_ARG_LCORE "=<uint16> ");
diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h
new file mode 100644
index 0000000000..eaa52364bf
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+#ifndef SKELETON_DMADEV_H
+#define SKELETON_DMADEV_H
+
+#include <pthread.h>
+
+#include <rte_ring.h>
+
+#define SKELDMA_ARG_LCORE	"lcore"
+
+struct skeldma_desc {
+	void *src;
+	void *dst;
+	uint32_t len;
+	uint16_t ridx; /* ring idx */
+};
+
+struct skeldma_hw {
+	int lcore_id; /* cpucopy task affinity core */
+	int socket_id;
+	pthread_t thread; /* cpucopy task thread */
+	volatile int exit_flag; /* cpucopy task exit flag */
+
+	struct skeldma_desc *desc_mem;
+
+	/* Descriptor ring state machine:
+	 *
+	 *  -----------     enqueue without submit     -----------
+	 *  |  empty  |------------------------------->| pending |
+	 *  -----------\                               -----------
+	 *       ^      \------------                       |
+	 *       |                  |                       |submit doorbell
+	 *       |                  |                       |
+	 *       |                  |enqueue with submit    |
+	 *       |get completed     |------------------|    |
+	 *       |                                     |    |
+	 *       |                                     v    v
+	 *  -----------     cpucopy thread working     -----------
+	 *  |completed|<-------------------------------| running |
+	 *  -----------                                -----------
+	 */
+	struct rte_ring *desc_empty;
+	struct rte_ring *desc_pending;
+	struct rte_ring *desc_running;
+	struct rte_ring *desc_completed;
+
+	/* Cache delimiter for dataplane API's operation data */
+	char cache1 __rte_cache_aligned;
+	uint16_t ridx;  /* ring idx */
+	uint64_t submitted_count;
+
+	/* Cache delimiter for cpucopy thread's operation data */
+	char cache2 __rte_cache_aligned;
+	uint32_t zero_req_count;
+	uint64_t completed_count;
+};
+
+#endif /* SKELETON_DMADEV_H */
diff --git a/drivers/dma/skeleton/version.map b/drivers/dma/skeleton/version.map
new file mode 100644
index 0000000000..c2e0723b4c
--- /dev/null
+++ b/drivers/dma/skeleton/version.map
@@ -0,0 +1,3 @@
+DPDK_22 {
+	local: *;
+};
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v23 6/6] app/test: add dmadev API test
  2021-09-24 10:53 ` [dpdk-dev] [PATCH v23 0/6] support dmadev Chengwen Feng
                     ` (4 preceding siblings ...)
  2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 5/6] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
@ 2021-09-24 10:53   ` Chengwen Feng
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-09-24 10:53 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch add dmadev API test which based on 'dma_skeleton' vdev. The
test cases could be executed using 'dmadev_autotest' command in test
framework.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 MAINTAINERS                |   1 +
 app/test/meson.build       |   4 +
 app/test/test_dmadev.c     |  41 +++
 app/test/test_dmadev_api.c | 574 +++++++++++++++++++++++++++++++++++++
 app/test/test_dmadev_api.h |   5 +
 5 files changed, 625 insertions(+)
 create mode 100644 app/test/test_dmadev.c
 create mode 100644 app/test/test_dmadev_api.c
 create mode 100644 app/test/test_dmadev_api.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 85d4f83395..3258da194d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -458,6 +458,7 @@ DMA device API - EXPERIMENTAL
 M: Chengwen Feng <fengchengwen@huawei.com>
 F: lib/dmadev/
 F: drivers/dma/skeleton/
+F: app/test/test_dmadev*
 F: doc/guides/prog_guide/dmadev.rst
 
 Eventdev API
diff --git a/app/test/meson.build b/app/test/meson.build
index a7611686ad..9027eba3a4 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -43,6 +43,8 @@ test_sources = files(
         'test_debug.c',
         'test_distributor.c',
         'test_distributor_perf.c',
+        'test_dmadev.c',
+        'test_dmadev_api.c',
         'test_eal_flags.c',
         'test_eal_fs.c',
         'test_efd.c',
@@ -162,6 +164,7 @@ test_deps = [
         'cmdline',
         'cryptodev',
         'distributor',
+        'dmadev',
         'efd',
         'ethdev',
         'eventdev',
@@ -333,6 +336,7 @@ driver_test_names = [
         'cryptodev_sw_mvsam_autotest',
         'cryptodev_sw_snow3g_autotest',
         'cryptodev_sw_zuc_autotest',
+        'dmadev_autotest',
         'eventdev_selftest_octeontx',
         'eventdev_selftest_sw',
         'rawdev_autotest',
diff --git a/app/test/test_dmadev.c b/app/test/test_dmadev.c
new file mode 100644
index 0000000000..75cc939158
--- /dev/null
+++ b/app/test/test_dmadev.c
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include <rte_dmadev.h>
+#include <rte_bus_vdev.h>
+
+#include "test.h"
+#include "test_dmadev_api.h"
+
+static int
+test_apis(void)
+{
+	const char *pmd = "dma_skeleton";
+	int id;
+	int ret;
+
+	if (rte_vdev_init(pmd, NULL) < 0)
+		return TEST_SKIPPED;
+	id = rte_dma_get_dev_id(pmd);
+	if (id < 0)
+		return TEST_SKIPPED;
+	printf("\n### Test dmadev infrastructure using skeleton driver\n");
+	ret = test_dma_api(id);
+	rte_vdev_uninit(pmd);
+
+	return ret;
+}
+
+static int
+test_dma(void)
+{
+	/* basic sanity on dmadev infrastructure */
+	if (test_apis() < 0)
+		return -1;
+
+	return 0;
+}
+
+REGISTER_TEST_COMMAND(dmadev_autotest, test_dma);
diff --git a/app/test/test_dmadev_api.c b/app/test/test_dmadev_api.c
new file mode 100644
index 0000000000..90c317aae2
--- /dev/null
+++ b/app/test/test_dmadev_api.c
@@ -0,0 +1,574 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+#include <string.h>
+
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_test.h>
+#include <rte_dmadev.h>
+
+extern int test_dma_api(uint16_t dev_id);
+
+#define DMA_TEST_API_RUN(test) \
+	testsuite_run_test(test, #test)
+
+#define TEST_MEMCPY_SIZE	1024
+#define TEST_WAIT_US_VAL	50000
+
+#define TEST_SUCCESS 0
+#define TEST_FAILED  -1
+
+static int16_t test_dev_id;
+static int16_t invalid_dev_id;
+
+static char *src;
+static char *dst;
+
+static int total;
+static int passed;
+static int failed;
+
+static int
+testsuite_setup(int16_t dev_id)
+{
+	test_dev_id = dev_id;
+	invalid_dev_id = -1;
+
+	src = rte_malloc("dmadev_test_src", TEST_MEMCPY_SIZE, 0);
+	if (src == NULL)
+		return -ENOMEM;
+	dst = rte_malloc("dmadev_test_dst", TEST_MEMCPY_SIZE, 0);
+	if (dst == NULL) {
+		rte_free(src);
+		src = NULL;
+		return -ENOMEM;
+	}
+
+	total = 0;
+	passed = 0;
+	failed = 0;
+
+	/* Set dmadev log level to critical to suppress unnecessary output
+	 * during API tests.
+	 */
+	rte_log_set_level_pattern("lib.dmadev", RTE_LOG_CRIT);
+
+	return 0;
+}
+
+static void
+testsuite_teardown(void)
+{
+	rte_free(src);
+	src = NULL;
+	rte_free(dst);
+	dst = NULL;
+	/* Ensure the dmadev is stopped. */
+	rte_dma_stop(test_dev_id);
+
+	rte_log_set_level_pattern("lib.dmadev", RTE_LOG_INFO);
+}
+
+static void
+testsuite_run_test(int (*test)(void), const char *name)
+{
+	int ret = 0;
+
+	if (test) {
+		ret = test();
+		if (ret < 0) {
+			failed++;
+			printf("%s Failed\n", name);
+		} else {
+			passed++;
+			printf("%s Passed\n", name);
+		}
+	}
+
+	total++;
+}
+
+static int
+test_dma_get_dev_id(void)
+{
+	int ret = rte_dma_get_dev_id("invalid_dmadev_device");
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_is_valid_dev(void)
+{
+	int ret;
+	ret = rte_dma_is_valid(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == false, "Expected false for invalid dev id");
+	ret = rte_dma_is_valid(test_dev_id);
+	RTE_TEST_ASSERT(ret == true, "Expected true for valid dev id");
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_count(void)
+{
+	uint16_t count = rte_dma_count_avail();
+	RTE_TEST_ASSERT(count > 0, "Invalid dmadev count %u", count);
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_info_get(void)
+{
+	struct rte_dma_info info =  { 0 };
+	int ret;
+
+	ret = rte_dma_info_get(invalid_dev_id, &info);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_info_get(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_configure(void)
+{
+	struct rte_dma_conf conf = { 0 };
+	struct rte_dma_info info = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_configure(invalid_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_configure(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for nb_vchans == 0 */
+	memset(&conf, 0, sizeof(conf));
+	ret = rte_dma_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for conf.nb_vchans > info.max_vchans */
+	ret = rte_dma_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans + 1;
+	ret = rte_dma_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check enable silent mode */
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans;
+	conf.enable_silent = true;
+	ret = rte_dma_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Configure success */
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans;
+	ret = rte_dma_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret);
+
+	/* Check configure success */
+	ret = rte_dma_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	RTE_TEST_ASSERT_EQUAL(conf.nb_vchans, info.nb_vchans,
+			      "Configure nb_vchans not match");
+
+	return TEST_SUCCESS;
+}
+
+static int
+check_direction(void)
+{
+	struct rte_dma_vchan_conf vchan_conf;
+	int ret;
+
+	/* Check for direction */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV + 1;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM - 1;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for direction and dev_capa combination */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_DEV;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_MEM;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	return 0;
+}
+
+static int
+check_port_type(struct rte_dma_info *dev_info)
+{
+	struct rte_dma_vchan_conf vchan_conf;
+	int ret;
+
+	/* Check src port type validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info->min_desc;
+	vchan_conf.src_port.port_type = RTE_DMA_PORT_PCIE;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check dst port type validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info->min_desc;
+	vchan_conf.dst_port.port_type = RTE_DMA_PORT_PCIE;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	return 0;
+}
+
+static int
+test_dma_vchan_setup(void)
+{
+	struct rte_dma_vchan_conf vchan_conf = { 0 };
+	struct rte_dma_conf dev_conf = { 0 };
+	struct rte_dma_info dev_info = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_vchan_setup(invalid_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_vchan_setup(test_dev_id, 0, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Make sure configure success */
+	ret = rte_dma_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	dev_conf.nb_vchans = dev_info.max_vchans;
+	ret = rte_dma_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret);
+
+	/* Check for invalid vchan */
+	ret = rte_dma_vchan_setup(test_dev_id, dev_conf.nb_vchans, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for direction */
+	ret = check_direction();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to check direction");
+
+	/* Check for nb_desc validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc - 1;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.nb_desc = dev_info.max_desc + 1;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check port type */
+	ret = check_port_type(&dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to check port type");
+
+	/* Check vchan setup success */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+setup_one_vchan(void)
+{
+	struct rte_dma_vchan_conf vchan_conf = { 0 };
+	struct rte_dma_info dev_info = { 0 };
+	struct rte_dma_conf dev_conf = { 0 };
+	int ret;
+
+	ret = rte_dma_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+	dev_conf.nb_vchans = dev_info.max_vchans;
+	ret = rte_dma_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_start_stop(void)
+{
+	struct rte_dma_vchan_conf vchan_conf = { 0 };
+	struct rte_dma_conf dev_conf = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_start(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_stop(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dma_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Check reconfigure and vchan setup when device started */
+	ret = rte_dma_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT(ret == -EBUSY, "Failed to configure, %d", ret);
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EBUSY, "Failed to setup vchan, %d", ret);
+
+	ret = rte_dma_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_stats(void)
+{
+	struct rte_dma_info dev_info = { 0 };
+	struct rte_dma_stats stats = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_stats_get(invalid_dev_id, 0, &stats);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_stats_get(invalid_dev_id, 0, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_stats_reset(invalid_dev_id, 0);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	/* Check for invalid vchan */
+	ret = rte_dma_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+	ret = rte_dma_stats_get(test_dev_id, dev_info.max_vchans, &stats);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_stats_reset(test_dev_id, dev_info.max_vchans);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for valid vchan */
+	ret = rte_dma_stats_get(test_dev_id, 0, &stats);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get stats, %d", ret);
+	ret = rte_dma_stats_get(test_dev_id, RTE_DMA_ALL_VCHAN, &stats);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get all stats, %d", ret);
+	ret = rte_dma_stats_reset(test_dev_id, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset stats, %d", ret);
+	ret = rte_dma_stats_reset(test_dev_id, RTE_DMA_ALL_VCHAN);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset all stats, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_dump(void)
+{
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_dump(invalid_dev_id, stderr);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Excepted -EINVAL, %d", ret);
+	ret = rte_dma_dump(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Excepted -EINVAL, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static void
+setup_memory(void)
+{
+	int i;
+
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++)
+		src[i] = (char)i;
+	memset(dst, 0, TEST_MEMCPY_SIZE);
+}
+
+static int
+verify_memory(void)
+{
+	int i;
+
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++) {
+		if (src[i] == dst[i])
+			continue;
+		RTE_TEST_ASSERT_EQUAL(src[i], dst[i],
+			"Failed to copy memory, %d %d", src[i], dst[i]);
+	}
+
+	return 0;
+}
+
+static int
+test_dma_completed(void)
+{
+	uint16_t last_idx = 1;
+	bool has_error = true;
+	uint16_t cpl_ret;
+	int ret;
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dma_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	setup_memory();
+
+	/* Check enqueue without submit */
+	ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+			   TEST_MEMCPY_SIZE, 0);
+	RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed(test_dev_id, 0, 1, &last_idx, &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to get completed");
+
+	/* Check add submit */
+	ret = rte_dma_submit(test_dev_id, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to submit, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed(test_dev_id, 0, 1, &last_idx, &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u",
+				last_idx);
+	RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+	ret = verify_memory();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to verify memory");
+
+	setup_memory();
+
+	/* Check for enqueue with submit */
+	ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+			   TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 1, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed(test_dev_id, 0, 1, &last_idx, &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u",
+				last_idx);
+	RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+	ret = verify_memory();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to verify memory");
+
+	/* Stop dmadev to make sure dmadev to a known state */
+	ret = rte_dma_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_completed_status(void)
+{
+	enum rte_dma_status_code status[1] = { 1 };
+	uint16_t last_idx = 1;
+	uint16_t cpl_ret, i;
+	int ret;
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dma_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Check for enqueue with submit */
+	ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+			   TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed_status(test_dev_id, 0, 1, &last_idx,
+					   status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u",
+				last_idx);
+	for (i = 0; i < RTE_DIM(status); i++)
+		RTE_TEST_ASSERT_EQUAL(status[i], 0,
+				"Failed to completed status, %d", status[i]);
+
+	/* Check do completed status again */
+	cpl_ret = rte_dma_completed_status(test_dev_id, 0, 1, &last_idx,
+					   status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to completed status");
+
+	/* Check for enqueue with submit again */
+	ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+			   TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 1, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed_status(test_dev_id, 0, 1, &last_idx,
+					   status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u",
+				last_idx);
+	for (i = 0; i < RTE_DIM(status); i++)
+		RTE_TEST_ASSERT_EQUAL(status[i], 0,
+				"Failed to completed status, %d", status[i]);
+
+	/* Stop dmadev to make sure dmadev to a known state */
+	ret = rte_dma_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+int
+test_dma_api(uint16_t dev_id)
+{
+	int ret = testsuite_setup(dev_id);
+	if (ret) {
+		printf("testsuite setup fail!\n");
+		return -1;
+	}
+
+	/* If the testcase exit successfully, ensure that the test dmadev exist
+	 * and the dmadev is in the stopped state.
+	 */
+	DMA_TEST_API_RUN(test_dma_get_dev_id);
+	DMA_TEST_API_RUN(test_dma_is_valid_dev);
+	DMA_TEST_API_RUN(test_dma_count);
+	DMA_TEST_API_RUN(test_dma_info_get);
+	DMA_TEST_API_RUN(test_dma_configure);
+	DMA_TEST_API_RUN(test_dma_vchan_setup);
+	DMA_TEST_API_RUN(test_dma_start_stop);
+	DMA_TEST_API_RUN(test_dma_stats);
+	DMA_TEST_API_RUN(test_dma_dump);
+	DMA_TEST_API_RUN(test_dma_completed);
+	DMA_TEST_API_RUN(test_dma_completed_status);
+
+	testsuite_teardown();
+
+	printf("Total tests   : %d\n", total);
+	printf("Passed        : %d\n", passed);
+	printf("Failed        : %d\n", failed);
+
+	if (failed)
+		return -1;
+
+	return 0;
+};
diff --git a/app/test/test_dmadev_api.h b/app/test/test_dmadev_api.h
new file mode 100644
index 0000000000..33fbc5bd41
--- /dev/null
+++ b/app/test/test_dmadev_api.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+int test_dma_api(uint16_t dev_id);
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v23 1/6] dmadev: introduce DMA device library
  2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 1/6] dmadev: introduce DMA device library Chengwen Feng
@ 2021-10-04 21:12     ` Radha Mohan
  2021-10-05  8:24       ` Kevin Laatz
  2021-10-08  1:52       ` fengchengwen
  2021-10-06 10:26     ` Thomas Monjalon
  1 sibling, 2 replies; 339+ messages in thread
From: Radha Mohan @ 2021-10-04 21:12 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: Thomas Monjalon, ferruh.yigit, bruce.richardson,
	Jerin Jacob Kollanukkaran, Jerin Jacob, andrew.rybchenko,
	dpdk-dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, David Marchand, Satananda Burla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz, Radha Chintakuntla

On Fri, Sep 24, 2021 at 3:58 AM Chengwen Feng <fengchengwen@huawei.com> wrote:
>
> The 'dmadevice' is a generic type of DMA device.
>
> This patch introduce the 'dmadevice' device allocation APIs.
>
> The infrastructure is prepared to welcome drivers in drivers/dma/
>
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
> Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
> Reviewed-by: Conor Walsh <conor.walsh@intel.com>
> ---
>  MAINTAINERS                            |   5 +
>  config/rte_config.h                    |   3 +
>  doc/api/doxy-api-index.md              |   1 +
>  doc/api/doxy-api.conf.in               |   1 +
>  doc/guides/dmadevs/index.rst           |  12 ++
>  doc/guides/index.rst                   |   1 +
>  doc/guides/prog_guide/dmadev.rst       |  64 ++++++
>  doc/guides/prog_guide/img/dmadev.svg   | 283 +++++++++++++++++++++++++
>  doc/guides/prog_guide/index.rst        |   1 +
>  doc/guides/rel_notes/release_21_11.rst |   4 +
>  drivers/dma/meson.build                |   4 +
>  drivers/meson.build                    |   1 +
>  lib/dmadev/meson.build                 |   7 +
>  lib/dmadev/rte_dmadev.c                | 263 +++++++++++++++++++++++
>  lib/dmadev/rte_dmadev.h                | 134 ++++++++++++
>  lib/dmadev/rte_dmadev_core.h           |  51 +++++
>  lib/dmadev/rte_dmadev_pmd.h            |  60 ++++++
>  lib/dmadev/version.map                 |  20 ++
>  lib/meson.build                        |   1 +
>  19 files changed, 916 insertions(+)
>  create mode 100644 doc/guides/dmadevs/index.rst
>  create mode 100644 doc/guides/prog_guide/dmadev.rst
>  create mode 100644 doc/guides/prog_guide/img/dmadev.svg
>  create mode 100644 drivers/dma/meson.build
>  create mode 100644 lib/dmadev/meson.build
>  create mode 100644 lib/dmadev/rte_dmadev.c
>  create mode 100644 lib/dmadev/rte_dmadev.h
>  create mode 100644 lib/dmadev/rte_dmadev_core.h
>  create mode 100644 lib/dmadev/rte_dmadev_pmd.h
>  create mode 100644 lib/dmadev/version.map
>
<snip>
Hi Chengwen,
I see that the new version removed the "rte_dmadev_get_device_by_name()".
What is the way to get the dmadev from inside the PMD .remove ? I am
looking to get the dev_private as we need to do some cleanup
operations from the remove function.

regards,
Radha Mohan

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v23 1/6] dmadev: introduce DMA device library
  2021-10-04 21:12     ` Radha Mohan
@ 2021-10-05  8:24       ` Kevin Laatz
  2021-10-05 16:39         ` Radha Mohan
  2021-10-08  1:52       ` fengchengwen
  1 sibling, 1 reply; 339+ messages in thread
From: Kevin Laatz @ 2021-10-05  8:24 UTC (permalink / raw)
  To: Radha Mohan, Chengwen Feng
  Cc: Thomas Monjalon, ferruh.yigit, bruce.richardson,
	Jerin Jacob Kollanukkaran, Jerin Jacob, andrew.rybchenko,
	dpdk-dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, David Marchand, Satananda Burla, pkapoor,
	konstantin.ananyev, conor.walsh, Radha Chintakuntla

On 04/10/2021 22:12, Radha Mohan wrote:
> On Fri, Sep 24, 2021 at 3:58 AM Chengwen Feng <fengchengwen@huawei.com> wrote:
>> The 'dmadevice' is a generic type of DMA device.
>>
>> This patch introduce the 'dmadevice' device allocation APIs.
>>
>> The infrastructure is prepared to welcome drivers in drivers/dma/
>>
>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
>> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
>> Acked-by: Morten Brørup <mb@smartsharesystems.com>
>> Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
>> Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
>> Reviewed-by: Conor Walsh <conor.walsh@intel.com>
>> ---
>>   MAINTAINERS                            |   5 +
>>   config/rte_config.h                    |   3 +
>>   doc/api/doxy-api-index.md              |   1 +
>>   doc/api/doxy-api.conf.in               |   1 +
>>   doc/guides/dmadevs/index.rst           |  12 ++
>>   doc/guides/index.rst                   |   1 +
>>   doc/guides/prog_guide/dmadev.rst       |  64 ++++++
>>   doc/guides/prog_guide/img/dmadev.svg   | 283 +++++++++++++++++++++++++
>>   doc/guides/prog_guide/index.rst        |   1 +
>>   doc/guides/rel_notes/release_21_11.rst |   4 +
>>   drivers/dma/meson.build                |   4 +
>>   drivers/meson.build                    |   1 +
>>   lib/dmadev/meson.build                 |   7 +
>>   lib/dmadev/rte_dmadev.c                | 263 +++++++++++++++++++++++
>>   lib/dmadev/rte_dmadev.h                | 134 ++++++++++++
>>   lib/dmadev/rte_dmadev_core.h           |  51 +++++
>>   lib/dmadev/rte_dmadev_pmd.h            |  60 ++++++
>>   lib/dmadev/version.map                 |  20 ++
>>   lib/meson.build                        |   1 +
>>   19 files changed, 916 insertions(+)
>>   create mode 100644 doc/guides/dmadevs/index.rst
>>   create mode 100644 doc/guides/prog_guide/dmadev.rst
>>   create mode 100644 doc/guides/prog_guide/img/dmadev.svg
>>   create mode 100644 drivers/dma/meson.build
>>   create mode 100644 lib/dmadev/meson.build
>>   create mode 100644 lib/dmadev/rte_dmadev.c
>>   create mode 100644 lib/dmadev/rte_dmadev.h
>>   create mode 100644 lib/dmadev/rte_dmadev_core.h
>>   create mode 100644 lib/dmadev/rte_dmadev_pmd.h
>>   create mode 100644 lib/dmadev/version.map
>>
> <snip>
> Hi Chengwen,
> I see that the new version removed the "rte_dmadev_get_device_by_name()".
> What is the way to get the dmadev from inside the PMD .remove ? I am
> looking to get the dev_private as we need to do some cleanup
> operations from the remove function.
>
> regards,
> Radha Mohan

Hi Radha,

You can use rte_dma_get_dev_id(name) to get the device ID, which can 
then be used to get the rte_dma_dev struct (which contains dev_private) 
for that device from rte_dma_devices[].

See "idxd_dmadev_destroy()" in 
http://patches.dpdk.org/project/dpdk/patch/20210924133916.4042773-6-kevin.laatz@intel.com/ 
for an example.

Hope that helps,

Kevin



^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v23 2/6] dmadev: add control plane function support
  2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 2/6] dmadev: add control plane function support Chengwen Feng
@ 2021-10-05 10:16     ` Matan Azrad
  2021-10-08  3:28       ` fengchengwen
  2021-10-06 10:46     ` Thomas Monjalon
  1 sibling, 1 reply; 339+ messages in thread
From: Matan Azrad @ 2021-10-05 10:16 UTC (permalink / raw)
  To: Chengwen Feng, NBU-Contact-Thomas Monjalon, ferruh.yigit,
	bruce.richardson, jerinj, jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

Hi Chengwen

API looks good to me, thanks!

I have some questions below.

> This patch add control plane functions for dmadev.
> 
<snip>
> +/**
> + * DMA transfer direction defines.
> + *
> + * @see struct rte_dma_vchan_conf::direction  */ enum rte_dma_direction
> +{
> +       /** DMA transfer direction - from memory to memory.
> +        *
> +        * @see struct rte_dma_vchan_conf::direction
> +        */
> +       RTE_DMA_DIR_MEM_TO_MEM,
> +       /** DMA transfer direction - from memory to device.
> +        * In a typical scenario, the SoCs are installed on host servers as
> +        * iNICs through the PCIe interface. In this case, the SoCs works in
> +        * EP(endpoint) mode, it could initiate a DMA move request from
> memory
> +        * (which is SoCs memory) to device (which is host memory).
> +        *
> +        * @see struct rte_dma_vchan_conf::direction
> +        */
> +       RTE_DMA_DIR_MEM_TO_DEV,


I don't understand precisely the meaning of mem and dev.

What does it mean SoCs memory?

What does it mean host memory?

What is the memory HW in these two types?

How does the user get the addresses of SoCs memory?

How does the user get the addresses of host memory?


Can dpdk app here access physical memory not mapped\allocated to the app?

Matan



> +       /** DMA transfer direction - from device to memory.
> +        * In a typical scenario, the SoCs are installed on host servers as
> +        * iNICs through the PCIe interface. In this case, the SoCs works in
> +        * EP(endpoint) mode, it could initiate a DMA move request from device
> +        * (which is host memory) to memory (which is SoCs memory).
> +        *
> +        * @see struct rte_dma_vchan_conf::direction
> +        */
> +       RTE_DMA_DIR_DEV_TO_MEM,
> +       /** DMA transfer direction - from device to device.
> +        * In a typical scenario, the SoCs are installed on host servers as
> +        * iNICs through the PCIe interface. In this case, the SoCs works in
> +        * EP(endpoint) mode, it could initiate a DMA move request from device
> +        * (which is host memory) to the device (which is another host
> memory).
> +        *
> +        * @see struct rte_dma_vchan_conf::direction
> +        */
> +       RTE_DMA_DIR_DEV_TO_DEV,
> +};
> +
> +/**
> + * DMA access port type defines.
> + *
> + * @see struct rte_dma_port_param::port_type  */ enum
> rte_dma_port_type
> +{
> +       RTE_DMA_PORT_NONE,
> +       RTE_DMA_PORT_PCIE, /**< The DMA access port is PCIe. */ };
> +
> +/**
> + * A structure used to descript DMA access port parameters.
> + *
> + * @see struct rte_dma_vchan_conf::src_port
> + * @see struct rte_dma_vchan_conf::dst_port  */ struct
> +rte_dma_port_param {
> +       /** The device access port type.
> +        *
> +        * @see enum rte_dma_port_type
> +        */
> +       enum rte_dma_port_type port_type;
> +       union {
> +               /** PCIe access port parameters.
> +                *
> +                * The following model shows SoC's PCIe module connects to
> +                * multiple PCIe hosts and multiple endpoints. The PCIe module
> +                * has an integrated DMA controller.
> +                *
> +                * If the DMA wants to access the memory of host A, it can be
> +                * initiated by PF1 in core0, or by VF0 of PF0 in core0.
> +                *
> +                * \code{.unparsed}
> +                * System Bus
> +                *    |     ----------PCIe module----------
> +                *    |     Bus
> +                *    |     Interface
> +                *    |     -----        ------------------
> +                *    |     |   |        | PCIe Core0     |
> +                *    |     |   |        |                |        -----------
> +                *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
> +                *    |     |   |--------|        |- VF-1 |--------| Root    |
> +                *    |     |   |        |   PF-1         |        | Complex |
> +                *    |     |   |        |   PF-2         |        -----------
> +                *    |     |   |        ------------------
> +                *    |     |   |
> +                *    |     |   |        ------------------
> +                *    |     |   |        | PCIe Core1     |
> +                *    |     |   |        |                |        -----------
> +                *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
> +                *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
> +                *    |     |   |        |        |- VF-1 |        | Complex |
> +                *    |     |   |        |   PF-2         |        -----------
> +                *    |     |   |        ------------------
> +                *    |     |   |
> +                *    |     |   |        ------------------
> +                *    |     |DMA|        |                |        ------
> +                *    |     |   |        |                |--------| EP |
> +                *    |     |   |--------| PCIe Core2     |        ------
> +                *    |     |   |        |                |        ------
> +                *    |     |   |        |                |--------| EP |
> +                *    |     |   |        |                |        ------
> +                *    |     -----        ------------------
> +                *
> +                * \endcode
> +                *
> +                * @note If some fields can not be supported by the
> +                * hardware/driver, then the driver ignores those fields.
> +                * Please check driver-specific documentation for limitations
> +                * and capablites.
> +                */
> +               struct {
> +                       uint64_t coreid : 4; /**< PCIe core id used. */
> +                       uint64_t pfid : 8; /**< PF id used. */
> +                       uint64_t vfen : 1; /**< VF enable bit. */
> +                       uint64_t vfid : 16; /**< VF id used. */
> +                       /** The pasid filed in TLP packet. */
> +                       uint64_t pasid : 20;
> +                       /** The attributes filed in TLP packet. */
> +                       uint64_t attr : 3;
> +                       /** The processing hint filed in TLP packet. */
> +                       uint64_t ph : 2;
> +                       /** The steering tag filed in TLP packet. */
> +                       uint64_t st : 16;
> +               } pcie;
> +       };
> +       uint64_t reserved[2]; /**< Reserved for future fields. */ };
> +
> +/**
> + * A structure used to configure a virtual DMA channel.
> + *
> + * @see rte_dma_vchan_setup
> + */
> +struct rte_dma_vchan_conf {
> +       /** Transfer direction
> +        *
> +        * @see enum rte_dma_direction
> +        */
> +       enum rte_dma_direction direction;
> +       /** Number of descriptor for the virtual DMA channel */
> +       uint16_t nb_desc;
> +       /** 1) Used to describes the device access port parameter in the
> +        * device-to-memory transfer scenario.
> +        * 2) Used to describes the source device access port parameter in the
> +        * device-to-device transfer scenario.
> +        *
> +        * @see struct rte_dma_port_param
> +        */
> +       struct rte_dma_port_param src_port;
> +       /** 1) Used to describes the device access port parameter in the
> +        * memory-to-device transfer scenario.
> +        * 2) Used to describes the destination device access port parameter in
> +        * the device-to-device transfer scenario.
> +        *
> +        * @see struct rte_dma_port_param
> +        */
> +       struct rte_dma_port_param dst_port; };
> +
<snip>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v23 1/6] dmadev: introduce DMA device library
  2021-10-05  8:24       ` Kevin Laatz
@ 2021-10-05 16:39         ` Radha Mohan
  0 siblings, 0 replies; 339+ messages in thread
From: Radha Mohan @ 2021-10-05 16:39 UTC (permalink / raw)
  To: Kevin Laatz
  Cc: Chengwen Feng, Thomas Monjalon, ferruh.yigit, bruce.richardson,
	Jerin Jacob Kollanukkaran, Jerin Jacob, andrew.rybchenko,
	dpdk-dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, David Marchand, Satananda Burla, pkapoor,
	konstantin.ananyev, conor.walsh, Radha Chintakuntla

On Tue, Oct 5, 2021 at 1:24 AM Kevin Laatz <kevin.laatz@intel.com> wrote:
>
> On 04/10/2021 22:12, Radha Mohan wrote:
> > On Fri, Sep 24, 2021 at 3:58 AM Chengwen Feng <fengchengwen@huawei.com> wrote:
> >> The 'dmadevice' is a generic type of DMA device.
> >>
> >> This patch introduce the 'dmadevice' device allocation APIs.
> >>
> >> The infrastructure is prepared to welcome drivers in drivers/dma/
> >>
> >> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> >> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> >> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> >> Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
> >> Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
> >> Reviewed-by: Conor Walsh <conor.walsh@intel.com>
> >> ---
> >>   MAINTAINERS                            |   5 +
> >>   config/rte_config.h                    |   3 +
> >>   doc/api/doxy-api-index.md              |   1 +
> >>   doc/api/doxy-api.conf.in               |   1 +
> >>   doc/guides/dmadevs/index.rst           |  12 ++
> >>   doc/guides/index.rst                   |   1 +
> >>   doc/guides/prog_guide/dmadev.rst       |  64 ++++++
> >>   doc/guides/prog_guide/img/dmadev.svg   | 283 +++++++++++++++++++++++++
> >>   doc/guides/prog_guide/index.rst        |   1 +
> >>   doc/guides/rel_notes/release_21_11.rst |   4 +
> >>   drivers/dma/meson.build                |   4 +
> >>   drivers/meson.build                    |   1 +
> >>   lib/dmadev/meson.build                 |   7 +
> >>   lib/dmadev/rte_dmadev.c                | 263 +++++++++++++++++++++++
> >>   lib/dmadev/rte_dmadev.h                | 134 ++++++++++++
> >>   lib/dmadev/rte_dmadev_core.h           |  51 +++++
> >>   lib/dmadev/rte_dmadev_pmd.h            |  60 ++++++
> >>   lib/dmadev/version.map                 |  20 ++
> >>   lib/meson.build                        |   1 +
> >>   19 files changed, 916 insertions(+)
> >>   create mode 100644 doc/guides/dmadevs/index.rst
> >>   create mode 100644 doc/guides/prog_guide/dmadev.rst
> >>   create mode 100644 doc/guides/prog_guide/img/dmadev.svg
> >>   create mode 100644 drivers/dma/meson.build
> >>   create mode 100644 lib/dmadev/meson.build
> >>   create mode 100644 lib/dmadev/rte_dmadev.c
> >>   create mode 100644 lib/dmadev/rte_dmadev.h
> >>   create mode 100644 lib/dmadev/rte_dmadev_core.h
> >>   create mode 100644 lib/dmadev/rte_dmadev_pmd.h
> >>   create mode 100644 lib/dmadev/version.map
> >>
> > <snip>
> > Hi Chengwen,
> > I see that the new version removed the "rte_dmadev_get_device_by_name()".
> > What is the way to get the dmadev from inside the PMD .remove ? I am
> > looking to get the dev_private as we need to do some cleanup
> > operations from the remove function.
> >
> > regards,
> > Radha Mohan
>
> Hi Radha,
>
> You can use rte_dma_get_dev_id(name) to get the device ID, which can
> then be used to get the rte_dma_dev struct (which contains dev_private)
> for that device from rte_dma_devices[].
>
> See "idxd_dmadev_destroy()" in
> http://patches.dpdk.org/project/dpdk/patch/20210924133916.4042773-6-kevin.laatz@intel.com/
> for an example.
>
> Hope that helps,

Thanks Kevin. It helped. I wasn't looking at accessing the
rte_dma_devices[] array directly. A library API would've been good.

regards,
Radha
>
> Kevin
>
>

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v23 1/6] dmadev: introduce DMA device library
  2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 1/6] dmadev: introduce DMA device library Chengwen Feng
  2021-10-04 21:12     ` Radha Mohan
@ 2021-10-06 10:26     ` Thomas Monjalon
  2021-10-08  7:13       ` fengchengwen
  1 sibling, 1 reply; 339+ messages in thread
From: Thomas Monjalon @ 2021-10-06 10:26 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko, dev, mb, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh, kevin.laatz

24/09/2021 12:53, Chengwen Feng:
> The 'dmadevice' is a generic type of DMA device.

Do you mean 'dmadev' ?

> This patch introduce the 'dmadevice' device allocation APIs.
> 
> The infrastructure is prepared to welcome drivers in drivers/dma/

Good

[...]
> +The DMA library provides a DMA device framework for management and provisioning
> +of hardware and software DMA poll mode drivers, defining generic APIs which

We could consider the whole as *one* API.

> +support a number of different DMA operations.
> +
> +
> +Design Principles
> +-----------------
> +
> +The DMA library follows the same basic principles as those used in DPDK's
> +Ethernet Device framework and the RegEx framework.

Not sure what this sentence means. Better to remove.

> The DMA framework provides
> +a generic DMA device framework which supports both physical (hardware)
> +and virtual (software) DMA devices as well as a generic DMA API which allows
> +DMA devices to be managed and configured and supports DMA operations to be
> +provisioned on DMA poll mode driver.

You could split this long sentence.

[...]
> +Physical DMA controllers are discovered during the PCI probe/enumeration of the
> +EAL function which is executed at DPDK initialization, this is based on their
> +PCI BDF (bus/bridge, device, function). Specific physical DMA controllers, like
> +other physical devices in DPDK can be listed using the EAL command line options.
> +
> +The dmadevs are dynamically allocated by using the API

not API, but function

> +``rte_dma_pmd_allocate`` based on the number of hardware DMA channels. After the
> +dmadev initialized successfully, the driver needs to switch the dmadev state to
> +``RTE_DMA_DEV_READY``.

Are we sure we need these details?

> +Device Identification
> +~~~~~~~~~~~~~~~~~~~~~
> +
> +Each DMA device, whether physical or virtual is uniquely designated by two
> +identifiers:
> +
> +- A unique device index used to designate the DMA device in all functions
> +  exported by the DMA API.
> +
> +- A device name used to designate the DMA device in console messages, for
> +  administration or debugging purposes.

Good

[...]
> --- a/doc/guides/rel_notes/release_21_11.rst
> +++ b/doc/guides/rel_notes/release_21_11.rst
> @@ -106,6 +106,10 @@ New Features
>    Added command-line options to specify total number of processes and
>    current process ID. Each process owns subset of Rx and Tx queues.
>  
> +* **Introduced dmadev library with:**
> +
> +  * Device allocation APIs.

There is no API for that, it is internal.
From an user perspective, you need only to list outstanding features
in the release notes.

[...]
> +++ b/lib/dmadev/rte_dmadev.c
> +RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
> +#define RTE_DMA_LOG(level, fmt, args...) \
> +	rte_log(RTE_LOG_ ## level, rte_dma_logtype, "%s(): " fmt "\n", \
> +		__func__, ##args)

I don't like having function name in all logs.
I recommend this form of macro:
#define RTE_DMA_LOG(level, ...) \
    rte_log(RTE_LOG_ ## level, rte_dma_logtype, RTE_FMT("dma: " \
        RTE_FMT_HEAD(__VA_ARGS__,) "\n", RTE_FMT_TAIL(__VA_ARGS__,)))

> +/* Macros to check for valid device id */
> +#define RTE_DMA_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
> +	if (!rte_dma_is_valid(dev_id)) { \
> +		RTE_DMA_LOG(ERR, "Invalid dev_id=%d", dev_id); \
> +		return retval; \
> +	} \
> +} while (0)

I dislike this macro doing a return. It is hiding stuff.
I know we have it in other classes but I think it is a mistake,
we should avoid macro blocks.

> +static int16_t
> +dma_find_free_dev(void)

Actually it is looking for an ID,
so it should be dma_find_free_id.

> +{
> +	int16_t i;
> +
> +	if (rte_dma_devices == NULL)
> +		return -1;
> +
> +	for (i = 0; i < dma_devices_max; i++) {
> +		if (rte_dma_devices[i].dev_name[0] == '\0')

Instead of checking its name, it looks more logical to check the state.

> +			return i;
> +	}
> +
> +	return -1;
> +}
> +
> +static struct rte_dma_dev*
> +dma_find(const char *name)

dma_find_by_name?

[...]
> +++ b/lib/dmadev/rte_dmadev.h
> + * The dmadev are dynamically allocated by rte_dma_pmd_allocate() during the
> + * PCI/SoC device probing phase performed at EAL initialization time. And could
> + * be released by rte_dma_pmd_release() during the PCI/SoC device removing
> + * phase.

I don't think this text has value,
and we could imagine allocating a device ata later stage.

[...]
> + * Configure the maximum number of dmadevs.
> + * @note This function can be invoked before the primary process rte_eal_init()
> + * to change the maximum number of dmadevs.

You should mention what is the default.
Is the default exported to the app in this file?

> + *
> + * @param dev_max
> + *   maximum number of dmadevs.
> + *
> + * @return
> + *   0 on success. Otherwise negative value is returned.
> + */
> +__rte_experimental
> +int rte_dma_dev_max(size_t dev_max);

What about a function able to do more with the name rte_dma_init?
It should allocate the inter-process shared memory,
and do the lookup in case of secondary process.

> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Get the device identifier for the named DMA device.
> + *
> + * @param name
> + *   DMA device name.
> + *
> + * @return
> + *   Returns DMA device identifier on success.
> + *   - <0: Failure to find named DMA device.
> + */
> +__rte_experimental
> +int rte_dma_get_dev_id(const char *name);

Should we add _by_name?
We could have a function to retrieve the ID by devargs as well.

> +++ b/lib/dmadev/rte_dmadev_core.h
> +/**
> + * @file
> + *
> + * DMA Device internal header.
> + *
> + * This header contains internal data types, that are used by the DMA devices
> + * in order to expose their ops to the class.
> + *
> + * Applications should not use these API directly.

If it is not part of the API, it should not be exposed at all.
Why not having all these stuff in a file dmadev_driver.h?
Is it used by some inline functions?

[...]
> +++ b/lib/dmadev/rte_dmadev_pmd.h
> +/**
> + * @file
> + *
> + * DMA Device PMD APIs
> + *
> + * Driver facing APIs for a DMA device. These are not to be called directly by

You cannot say API for drivers, because API means application interface.
What you mean is "driver interface".

> + * any application.
> + */
[...]
> + * Allocates a new dmadev slot for an DMA device and returns the pointer
> + * to that slot for the driver to use.

Please in all comments, use the infinitive form. Example:
	Allocates -> Allocate

> + *
> + * @param name
> + *   DMA device name.
> + * @param numa_node
> + *   Driver's private data's numa node.

s/numa/NUMA/

> + * @param private_data_size
> + *   Driver's private data size.
> + *
> + * @return
> + *   A pointer to the DMA device slot case of success,
> + *   NULL otherwise.
> + */
> +__rte_internal
> +struct rte_dma_dev *rte_dma_pmd_allocate(const char *name, int numa_node,
> +					 size_t private_data_size);


OK, sorry there are a lot of comments.
Overrall, that's a good work.
I know you are in holidays, I hope we can finish during next week.



^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v23 2/6] dmadev: add control plane function support
  2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 2/6] dmadev: add control plane function support Chengwen Feng
  2021-10-05 10:16     ` Matan Azrad
@ 2021-10-06 10:46     ` Thomas Monjalon
  2021-10-08  7:55       ` fengchengwen
  1 sibling, 1 reply; 339+ messages in thread
From: Thomas Monjalon @ 2021-10-06 10:46 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko, dev, mb, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh, kevin.laatz

24/09/2021 12:53, Chengwen Feng:
> --- a/doc/guides/prog_guide/dmadev.rst
> +++ b/doc/guides/prog_guide/dmadev.rst
> @@ -62,3 +62,44 @@ identifiers:
>  
>  - A device name used to designate the DMA device in console messages, for
>    administration or debugging purposes.
> +
> +
> +Device Configuration
> +~~~~~~~~~~~~~~~~~~~~
> +
> +The rte_dma_configure API is used to configure a DMA device.
> +
> +.. code-block:: c
> +
> +   int rte_dma_configure(int16_t dev_id,
> +                         const struct rte_dma_conf *dev_conf);
> +
> +The ``rte_dma_conf`` structure is used to pass the configuration parameters
> +for the DMA device for example the number of virtual DMA channels to set up,
> +indication of whether to enable silent mode.
> +
> +
> +Configuration of Virtual DMA Channels
> +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> +
> +The rte_dma_vchan_setup API is used to configure a virtual DMA channel.
> +
> +.. code-block:: c
> +
> +   int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
> +                           const struct rte_dma_vchan_conf *conf);
> +
> +The ``rte_dma_vchan_conf`` structure is used to pass the configuration
> +parameters for the virtual DMA channel for example transfer direction, number of
> +descriptor for the virtual DMA channel, source device access port parameter,
> +destination device access port parameter.

You should avoid being redundant with the Doxygen documentation.
In the guide, it should be only explaining the concepts, not the details.
For the details of each function, we refer to Doxygen.


> --- a/lib/dmadev/rte_dmadev.c
> +++ b/lib/dmadev/rte_dmadev.c
> @@ -218,6 +218,9 @@ rte_dma_pmd_release(const char *name)
>  	if (dev == NULL)
>  		return -EINVAL;
>  
> +	if (dev->state == RTE_DMA_DEV_READY)
> +		return rte_dma_close(dev->dev_id);

What is the logic here?
The only exposed function should be rte_dma_close()
and it should call the freeing function.
The API should use the dev_id. As you said somewhere else,
the name is only for debugging.
Please remove the function rte_dma_pmd_release(const char *name).

[...]
> --- a/lib/dmadev/rte_dmadev.h
> +++ b/lib/dmadev/rte_dmadev.h
> + * The functions exported by the dmadev API to setup a device designated by its
> + * device identifier must be invoked in the following order:
> + *     - rte_dma_configure()
> + *     - rte_dma_vchan_setup()
> + *     - rte_dma_start()
> + *
> + * If the application wants to change the configuration (i.e. invoke
> + * rte_dma_configure() or rte_dma_vchan_setup()), it must invoke
> + * rte_dma_stop() first to stop the device and then do the reconfiguration
> + * before invoking rte_dma_start() again. The dataplane functions should not
> + * be invoked when the device is stopped.
> + *
> + * Finally, an application can close a dmadev by invoking the rte_dma_close()
> + * function.

Yes rte_dma_close, not rte_dma_pmd_release.

> + *
> + * About MT-safe, all the functions of the dmadev API exported by a PMD are

API is not exported by a PMD, but implemented.

> + * lock-free functions which assume to not be invoked in parallel on different
> + * logical cores to work on the same target dmadev object.
> + * @note Different virtual DMA channels on the same dmadev *DO NOT* support
> + * parallel invocation because these virtual DMA channels share the same
> + * HW-DMA-channel.
> + *
>   */

No need of final blank line in a comment.

> +/** DMA device support memory-to-memory transfer.
> + *
> + * @see struct rte_dma_info::dev_capa
> + */
> +#define RTE_DMA_CAPA_MEM_TO_MEM		RTE_BIT64(0)
> +/** DMA device support memory-to-device transfer.
> + *
> + * @see struct rte_dma_info::dev_capa
> + */
> +#define RTE_DMA_CAPA_MEM_TO_DEV		RTE_BIT64(1)

Same comment as in earlier version: please group the flags
in a doxygen group. Example of doxygen group:
https://patches.dpdk.org/project/dpdk/patch/20210830104232.598703-1-thomas@monjalon.net/

[...]
You are using uint64_t bitfields and anonymous union in below struct,
it may not compile if not using __extension__ from RTE_STD_C11.

> +struct rte_dma_port_param {
> +	/** The device access port type.
> +	 *
> +	 * @see enum rte_dma_port_type
> +	 */
> +	enum rte_dma_port_type port_type;
> +	union {
[...]
> +		struct {
> +			uint64_t coreid : 4; /**< PCIe core id used. */
> +			uint64_t pfid : 8; /**< PF id used. */
> +			uint64_t vfen : 1; /**< VF enable bit. */
> +			uint64_t vfid : 16; /**< VF id used. */
> +			/** The pasid filed in TLP packet. */
> +			uint64_t pasid : 20;
> +			/** The attributes filed in TLP packet. */
> +			uint64_t attr : 3;
> +			/** The processing hint filed in TLP packet. */
> +			uint64_t ph : 2;
> +			/** The steering tag filed in TLP packet. */
> +			uint64_t st : 16;
> +		} pcie;
> +	};
> +	uint64_t reserved[2]; /**< Reserved for future fields. */
> +};

> --- a/lib/dmadev/rte_dmadev_core.h
> +++ b/lib/dmadev/rte_dmadev_core.h
> +/** @internal Used to get device information of a device. */
> +typedef int (*rte_dma_info_get_t)(const struct rte_dma_dev *dev,
> +				  struct rte_dma_info *dev_info,
> +				  uint32_t info_sz);

Please move all driver interfaces in a file dedicated to drivers.

[...]
> @@ -40,9 +96,13 @@ struct rte_dma_dev {
>  	int16_t dev_id; /**< Device [external] identifier. */
>  	int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */
>  	void *dev_private; /**< PMD-specific private data. */
> +	/** Functions exported by PMD. */

s/exported/implemented/

> +	const struct rte_dma_dev_ops *dev_ops;
> +	struct rte_dma_conf dev_conf; /**< DMA device configuration. */
>  	/** Device info which supplied during device initialization. */
>  	struct rte_device *device;
>  	enum rte_dma_dev_state state; /**< Flag indicating the device state. */
> +	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
>  	uint64_t reserved[2]; /**< Reserved for future fields. */
>  } __rte_cache_aligned;




^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v23 1/6] dmadev: introduce DMA device library
  2021-10-04 21:12     ` Radha Mohan
  2021-10-05  8:24       ` Kevin Laatz
@ 2021-10-08  1:52       ` fengchengwen
  1 sibling, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-10-08  1:52 UTC (permalink / raw)
  To: Radha Mohan
  Cc: Thomas Monjalon, ferruh.yigit, bruce.richardson,
	Jerin Jacob Kollanukkaran, Jerin Jacob, andrew.rybchenko,
	dpdk-dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, David Marchand, Satananda Burla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz, Radha Chintakuntla

On 2021/10/5 5:12, Radha Mohan wrote:
> On Fri, Sep 24, 2021 at 3:58 AM Chengwen Feng <fengchengwen@huawei.com> wrote:
>>
>> The 'dmadevice' is a generic type of DMA device.
>>
>> This patch introduce the 'dmadevice' device allocation APIs.
>>
>> The infrastructure is prepared to welcome drivers in drivers/dma/
>>
>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
>> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
>> Acked-by: Morten Brørup <mb@smartsharesystems.com>
>> Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
>> Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
>> Reviewed-by: Conor Walsh <conor.walsh@intel.com>
>> ---
>>  MAINTAINERS                            |   5 +
>>  config/rte_config.h                    |   3 +
>>  doc/api/doxy-api-index.md              |   1 +
>>  doc/api/doxy-api.conf.in               |   1 +
>>  doc/guides/dmadevs/index.rst           |  12 ++
>>  doc/guides/index.rst                   |   1 +
>>  doc/guides/prog_guide/dmadev.rst       |  64 ++++++
>>  doc/guides/prog_guide/img/dmadev.svg   | 283 +++++++++++++++++++++++++
>>  doc/guides/prog_guide/index.rst        |   1 +
>>  doc/guides/rel_notes/release_21_11.rst |   4 +
>>  drivers/dma/meson.build                |   4 +
>>  drivers/meson.build                    |   1 +
>>  lib/dmadev/meson.build                 |   7 +
>>  lib/dmadev/rte_dmadev.c                | 263 +++++++++++++++++++++++
>>  lib/dmadev/rte_dmadev.h                | 134 ++++++++++++
>>  lib/dmadev/rte_dmadev_core.h           |  51 +++++
>>  lib/dmadev/rte_dmadev_pmd.h            |  60 ++++++
>>  lib/dmadev/version.map                 |  20 ++
>>  lib/meson.build                        |   1 +
>>  19 files changed, 916 insertions(+)
>>  create mode 100644 doc/guides/dmadevs/index.rst
>>  create mode 100644 doc/guides/prog_guide/dmadev.rst
>>  create mode 100644 doc/guides/prog_guide/img/dmadev.svg
>>  create mode 100644 drivers/dma/meson.build
>>  create mode 100644 lib/dmadev/meson.build
>>  create mode 100644 lib/dmadev/rte_dmadev.c
>>  create mode 100644 lib/dmadev/rte_dmadev.h
>>  create mode 100644 lib/dmadev/rte_dmadev_core.h
>>  create mode 100644 lib/dmadev/rte_dmadev_pmd.h
>>  create mode 100644 lib/dmadev/version.map
>>
> <snip>
> Hi Chengwen,
> I see that the new version removed the "rte_dmadev_get_device_by_name()".
> What is the way to get the dmadev from inside the PMD .remove ? I am
> looking to get the dev_private as we need to do some cleanup
> operations from the remove function.

Hi Radha,

PMD should invoke rte_dma_pmd_release when .remove, and the rte_dma_pmd_release will
call dev_close ops, so that PMD could do some cleanup operations in dev_close ops.

Thanks

> 
> regards,
> Radha Mohan
> 
> .
> 


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v23 2/6] dmadev: add control plane function support
  2021-10-05 10:16     ` Matan Azrad
@ 2021-10-08  3:28       ` fengchengwen
  0 siblings, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-10-08  3:28 UTC (permalink / raw)
  To: Matan Azrad, NBU-Contact-Thomas Monjalon, ferruh.yigit,
	bruce.richardson, jerinj, jerinjacobk, andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

On 2021/10/5 18:16, Matan Azrad wrote:
> Hi Chengwen
> 
> API looks good to me, thanks!
> 
> I have some questions below.
> 
>> This patch add control plane functions for dmadev.
>>
> <snip>
>> +/**
>> + * DMA transfer direction defines.
>> + *
>> + * @see struct rte_dma_vchan_conf::direction  */ enum rte_dma_direction
>> +{
>> +       /** DMA transfer direction - from memory to memory.
>> +        *
>> +        * @see struct rte_dma_vchan_conf::direction
>> +        */
>> +       RTE_DMA_DIR_MEM_TO_MEM,
>> +       /** DMA transfer direction - from memory to device.
>> +        * In a typical scenario, the SoCs are installed on host servers as
>> +        * iNICs through the PCIe interface. In this case, the SoCs works in
>> +        * EP(endpoint) mode, it could initiate a DMA move request from
>> memory
>> +        * (which is SoCs memory) to device (which is host memory).
>> +        *
>> +        * @see struct rte_dma_vchan_conf::direction
>> +        */
>> +       RTE_DMA_DIR_MEM_TO_DEV,
> 
> 
> I don't understand precisely the meaning of mem and dev.
> 
> What does it mean SoCs memory?
> 
> What does it mean host memory?
> 
> What is the memory HW in these two types?
> 
> How does the user get the addresses of SoCs memory?
> 
> How does the user get the addresses of host memory?
> 

Hi Matan,

	System Bus
	    |     ----------PCIe module----------
	    |     Bus
	    |     Interface
	    |     -----        ------------------
	    |     |   |        | PCIe Core0     |
 DDR3 ------|     |   |        |                |        -----------
	    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
	    |     |   |--------|        |- VF-1 |--------| Root    | ---DDR1
	    |     |   |        |   PF-1         |        | Complex |
	    |     |   |        |   PF-2         |        -----------
	    |     |   |        ------------------
	    |     |DMA|
	    |     |   |        ------------------
	    |     |   |        | PCIe Core1     |
	    |     |   |        |                |        -----------
	    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
	    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    | ---DDR2
	    |     |   |        |        |- VF-1 |        | Complex |
	    |     |   |        |   PF-2         |        -----------
	    |     |   |        ------------------
	    |     -----

---------------------SOC-------------------------        -------HOST--------

As shown in the above figure, the SOC is connected to two hosts.
The DDR3 is the SOC's memory, the DDR1 is HOST-A's memory, and DDR2 is HOST-B's memory.

To access DDR3 memory, APP could mmap and pass its IOVA address to DMA.
To access DDR1/2 memory, some devices use parameterized descriptor which contain the
function from which the request is sent and the destination address. In this case, it is
up to the user to confirm the function and destination address which could passed in as
parameters.

Thanks.

> 
> Can dpdk app here access physical memory not mapped\allocated to the app?
> 
> Matan
> 
> 
> 
>> +       /** DMA transfer direction - from device to memory.
>> +        * In a typical scenario, the SoCs are installed on host servers as
>> +        * iNICs through the PCIe interface. In this case, the SoCs works in
>> +        * EP(endpoint) mode, it could initiate a DMA move request from device
>> +        * (which is host memory) to memory (which is SoCs memory).
>> +        *
>> +        * @see struct rte_dma_vchan_conf::direction
>> +        */
>> +       RTE_DMA_DIR_DEV_TO_MEM,
>> +       /** DMA transfer direction - from device to device.
>> +        * In a typical scenario, the SoCs are installed on host servers as
>> +        * iNICs through the PCIe interface. In this case, the SoCs works in
>> +        * EP(endpoint) mode, it could initiate a DMA move request from device
>> +        * (which is host memory) to the device (which is another host
>> memory).
>> +        *
>> +        * @see struct rte_dma_vchan_conf::direction
>> +        */
>> +       RTE_DMA_DIR_DEV_TO_DEV,
>> +};
>> +
>> +/**
>> + * DMA access port type defines.
>> + *
>> + * @see struct rte_dma_port_param::port_type  */ enum
>> rte_dma_port_type
>> +{
>> +       RTE_DMA_PORT_NONE,
>> +       RTE_DMA_PORT_PCIE, /**< The DMA access port is PCIe. */ };
>> +
>> +/**
>> + * A structure used to descript DMA access port parameters.
>> + *
>> + * @see struct rte_dma_vchan_conf::src_port
>> + * @see struct rte_dma_vchan_conf::dst_port  */ struct
>> +rte_dma_port_param {
>> +       /** The device access port type.
>> +        *
>> +        * @see enum rte_dma_port_type
>> +        */
>> +       enum rte_dma_port_type port_type;
>> +       union {
>> +               /** PCIe access port parameters.
>> +                *
>> +                * The following model shows SoC's PCIe module connects to
>> +                * multiple PCIe hosts and multiple endpoints. The PCIe module
>> +                * has an integrated DMA controller.
>> +                *
>> +                * If the DMA wants to access the memory of host A, it can be
>> +                * initiated by PF1 in core0, or by VF0 of PF0 in core0.
>> +                *
>> +                * \code{.unparsed}
>> +                * System Bus
>> +                *    |     ----------PCIe module----------
>> +                *    |     Bus
>> +                *    |     Interface
>> +                *    |     -----        ------------------
>> +                *    |     |   |        | PCIe Core0     |
>> +                *    |     |   |        |                |        -----------
>> +                *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
>> +                *    |     |   |--------|        |- VF-1 |--------| Root    |
>> +                *    |     |   |        |   PF-1         |        | Complex |
>> +                *    |     |   |        |   PF-2         |        -----------
>> +                *    |     |   |        ------------------
>> +                *    |     |   |
>> +                *    |     |   |        ------------------
>> +                *    |     |   |        | PCIe Core1     |
>> +                *    |     |   |        |                |        -----------
>> +                *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
>> +                *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
>> +                *    |     |   |        |        |- VF-1 |        | Complex |
>> +                *    |     |   |        |   PF-2         |        -----------
>> +                *    |     |   |        ------------------
>> +                *    |     |   |
>> +                *    |     |   |        ------------------
>> +                *    |     |DMA|        |                |        ------
>> +                *    |     |   |        |                |--------| EP |
>> +                *    |     |   |--------| PCIe Core2     |        ------
>> +                *    |     |   |        |                |        ------
>> +                *    |     |   |        |                |--------| EP |
>> +                *    |     |   |        |                |        ------
>> +                *    |     -----        ------------------
>> +                *
>> +                * \endcode
>> +                *
>> +                * @note If some fields can not be supported by the
>> +                * hardware/driver, then the driver ignores those fields.
>> +                * Please check driver-specific documentation for limitations
>> +                * and capablites.
>> +                */
>> +               struct {
>> +                       uint64_t coreid : 4; /**< PCIe core id used. */
>> +                       uint64_t pfid : 8; /**< PF id used. */
>> +                       uint64_t vfen : 1; /**< VF enable bit. */
>> +                       uint64_t vfid : 16; /**< VF id used. */
>> +                       /** The pasid filed in TLP packet. */
>> +                       uint64_t pasid : 20;
>> +                       /** The attributes filed in TLP packet. */
>> +                       uint64_t attr : 3;
>> +                       /** The processing hint filed in TLP packet. */
>> +                       uint64_t ph : 2;
>> +                       /** The steering tag filed in TLP packet. */
>> +                       uint64_t st : 16;
>> +               } pcie;
>> +       };
>> +       uint64_t reserved[2]; /**< Reserved for future fields. */ };
>> +
>> +/**
>> + * A structure used to configure a virtual DMA channel.
>> + *
>> + * @see rte_dma_vchan_setup
>> + */
>> +struct rte_dma_vchan_conf {
>> +       /** Transfer direction
>> +        *
>> +        * @see enum rte_dma_direction
>> +        */
>> +       enum rte_dma_direction direction;
>> +       /** Number of descriptor for the virtual DMA channel */
>> +       uint16_t nb_desc;
>> +       /** 1) Used to describes the device access port parameter in the
>> +        * device-to-memory transfer scenario.
>> +        * 2) Used to describes the source device access port parameter in the
>> +        * device-to-device transfer scenario.
>> +        *
>> +        * @see struct rte_dma_port_param
>> +        */
>> +       struct rte_dma_port_param src_port;
>> +       /** 1) Used to describes the device access port parameter in the
>> +        * memory-to-device transfer scenario.
>> +        * 2) Used to describes the destination device access port parameter in
>> +        * the device-to-device transfer scenario.
>> +        *
>> +        * @see struct rte_dma_port_param
>> +        */
>> +       struct rte_dma_port_param dst_port; };
>> +
> <snip>
> 


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v23 1/6] dmadev: introduce DMA device library
  2021-10-06 10:26     ` Thomas Monjalon
@ 2021-10-08  7:13       ` fengchengwen
  2021-10-08 10:09         ` Thomas Monjalon
  0 siblings, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-10-08  7:13 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko, dev, mb, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh, kevin.laatz

On 2021/10/6 18:26, Thomas Monjalon wrote:
> 24/09/2021 12:53, Chengwen Feng:
>> The 'dmadevice' is a generic type of DMA device.
> 
> Do you mean 'dmadev' ?
> 
>> This patch introduce the 'dmadevice' device allocation APIs.
>>
>> The infrastructure is prepared to welcome drivers in drivers/dma/
> 
> Good
> 
> [...]
>> +The DMA library provides a DMA device framework for management and provisioning
>> +of hardware and software DMA poll mode drivers, defining generic APIs which

[snip]

> 
> [...]
>> +++ b/lib/dmadev/rte_dmadev.h
>> + * The dmadev are dynamically allocated by rte_dma_pmd_allocate() during the
>> + * PCI/SoC device probing phase performed at EAL initialization time. And could
>> + * be released by rte_dma_pmd_release() during the PCI/SoC device removing
>> + * phase.
> 
> I don't think this text has value,
> and we could imagine allocating a device ata later stage.

Yes, we could remove the stage descriptor because it's a well-known knowledge, but I
recommend keeping the rte_dma_pmd_allocate and rte_dma_pmd_release functions, how about:

 * The dmadev are dynamically allocated by rte_dma_pmd_allocate(). And could
 * be released by rte_dma_pmd_release().

> 
> [...]
>> + * Configure the maximum number of dmadevs.
>> + * @note This function can be invoked before the primary process rte_eal_init()
>> + * to change the maximum number of dmadevs.
> 
> You should mention what is the default.
> Is the default exported to the app in this file?

The default macro is RTE_DMADEV_DEFAULT_MAX_DEVS, and I place it at rte_config.h.

I think it's better to focus on one place (rte_config.h) than to modify config in multiple places (e.g. rte_dmadev.h/rte_xxx.h).

> 
>> + *
>> + * @param dev_max
>> + *   maximum number of dmadevs.
>> + *
>> + * @return
>> + *   0 on success. Otherwise negative value is returned.
>> + */
>> +__rte_experimental
>> +int rte_dma_dev_max(size_t dev_max);
> 
> What about a function able to do more with the name rte_dma_init?
> It should allocate the inter-process shared memory,
> and do the lookup in case of secondary process.

Yes, we defined dma_data_prepare() which do above thing, it's in 4th patch.

Because we could not invoke some like allocate inter-process shared memory before
rte_eal_init, so I think it's better keep rte_dma_dev_max as it is.

> 
>> +/**
>> + * @warning
>> + * @b EXPERIMENTAL: this API may change without prior notice.
>> + *
>> + * Get the device identifier for the named DMA device.
>> + *
>> + * @param name
>> + *   DMA device name.
>> + *
>> + * @return
>> + *   Returns DMA device identifier on success.
>> + *   - <0: Failure to find named DMA device.
>> + */
>> +__rte_experimental
>> +int rte_dma_get_dev_id(const char *name);
> 
> Should we add _by_name?
> We could have a function to retrieve the ID by devargs as well.
> 
>> +++ b/lib/dmadev/rte_dmadev_core.h
>> +/**
>> + * @file
>> + *
>> + * DMA Device internal header.
>> + *
>> + * This header contains internal data types, that are used by the DMA devices
>> + * in order to expose their ops to the class.
>> + *
>> + * Applications should not use these API directly.
> 
> If it is not part of the API, it should not be exposed at all.
> Why not having all these stuff in a file dmadev_driver.h?
> Is it used by some inline functions?

Yes, it's used by dataplane inline functions.

[snip]

> .
> 

Thanks


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v23 2/6] dmadev: add control plane function support
  2021-10-06 10:46     ` Thomas Monjalon
@ 2021-10-08  7:55       ` fengchengwen
  2021-10-08 10:18         ` Thomas Monjalon
  0 siblings, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-10-08  7:55 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko, dev, mb, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh, kevin.laatz

On 2021/10/6 18:46, Thomas Monjalon wrote:
> 24/09/2021 12:53, Chengwen Feng:
>> --- a/doc/guides/prog_guide/dmadev.rst
>> +++ b/doc/guides/prog_guide/dmadev.rst
>> @@ -62,3 +62,44 @@ identifiers:
>>  
>>  - A device name used to designate the DMA device in console messages, for
>>    administration or debugging purposes.

[snip]

> 
>> --- a/lib/dmadev/rte_dmadev.c
>> +++ b/lib/dmadev/rte_dmadev.c
>> @@ -218,6 +218,9 @@ rte_dma_pmd_release(const char *name)
>>  	if (dev == NULL)
>>  		return -EINVAL;
>>  
>> +	if (dev->state == RTE_DMA_DEV_READY)
>> +		return rte_dma_close(dev->dev_id);
> 
> What is the logic here?
> The only exposed function should be rte_dma_close()
> and it should call the freeing function.
> The API should use the dev_id. As you said somewhere else,
> the name is only for debugging.
> Please remove the function rte_dma_pmd_release(const char *name).

The rte_dma_pmd_release corresponding to pmd_allocate, so both use the 'const char *name' parameter.

The rte_dma_pmd_release is also used for error handling when PMD init. Therefore, a status variable
is used here. If the device is not ready, only resources need to be released. Otherwise, the close
interface of the driver is invoked.

For PMD, the rte_dma_pmd_release is only wrap for dev_close when remove device, it does not need to
implement two callbacks.

If we replace rte_dma_pmd_release with rte_dma_close, then we should invoke rte_dma_close in error
handling when PMD init, this can lead to conceptual inconsistencies because the initialization has
not been successful.

So I think it's better keep rte_dma_pmd_release.

> 
> [...]
>> --- a/lib/dmadev/rte_dmadev.h
>> +++ b/lib/dmadev/rte_dmadev.h
>> + * The functions exported by the dmadev API to setup a device designated by its
>> + * device identifier must be invoked in the following order:
>> + *     - rte_dma_configure()
>> + *     - rte_dma_vchan_setup()
>> + *     - rte_dma_start()
>> + *
>> + * If the application wants to change the configuration (i.e. invoke
>> + * rte_dma_configure() or rte_dma_vchan_setup()), it must invoke
>> + * rte_dma_stop() first to stop the device and then do the reconfiguration
>> + * before invoking rte_dma_start() again. The dataplane functions should not
>> + * be invoked when the device is stopped.
>> + *
>> + * Finally, an application can close a dmadev by invoking the rte_dma_close()
>> + * function.
> 
> Yes rte_dma_close, not rte_dma_pmd_release.
> 
>> + *
>> + * About MT-safe, all the functions of the dmadev API exported by a PMD are
> 
> API is not exported by a PMD, but implemented.
> 
>> + * lock-free functions which assume to not be invoked in parallel on different
>> + * logical cores to work on the same target dmadev object.
>> + * @note Different virtual DMA channels on the same dmadev *DO NOT* support
>> + * parallel invocation because these virtual DMA channels share the same
>> + * HW-DMA-channel.
>> + *
>>   */
> 
> No need of final blank line in a comment.
> 
>> +/** DMA device support memory-to-memory transfer.
>> + *
>> + * @see struct rte_dma_info::dev_capa
>> + */
>> +#define RTE_DMA_CAPA_MEM_TO_MEM		RTE_BIT64(0)
>> +/** DMA device support memory-to-device transfer.
>> + *
>> + * @see struct rte_dma_info::dev_capa
>> + */
>> +#define RTE_DMA_CAPA_MEM_TO_DEV		RTE_BIT64(1)
> 
> Same comment as in earlier version: please group the flags
> in a doxygen group. Example of doxygen group:
> https://patches.dpdk.org/project/dpdk/patch/20210830104232.598703-1-thomas@monjalon.net/

Tried, but found it didn't coexist well with multi-line comments.

> 
> [...]
> You are using uint64_t bitfields and anonymous union in below struct,
> it may not compile if not using __extension__ from RTE_STD_C11.
> 
>> +struct rte_dma_port_param {
>> +	/** The device access port type.
>> +	 *
>> +	 * @see enum rte_dma_port_type
>> +	 */
>> +	enum rte_dma_port_type port_type;
>> +	union {
> [...]
>> +		struct {
>> +			uint64_t coreid : 4; /**< PCIe core id used. */
>> +			uint64_t pfid : 8; /**< PF id used. */
>> +			uint64_t vfen : 1; /**< VF enable bit. */
>> +			uint64_t vfid : 16; /**< VF id used. */
>> +			/** The pasid filed in TLP packet. */
>> +			uint64_t pasid : 20;
>> +			/** The attributes filed in TLP packet. */
>> +			uint64_t attr : 3;
>> +			/** The processing hint filed in TLP packet. */
>> +			uint64_t ph : 2;
>> +			/** The steering tag filed in TLP packet. */
>> +			uint64_t st : 16;
>> +		} pcie;
>> +	};
>> +	uint64_t reserved[2]; /**< Reserved for future fields. */
>> +};
> 
>> --- a/lib/dmadev/rte_dmadev_core.h
>> +++ b/lib/dmadev/rte_dmadev_core.h
>> +/** @internal Used to get device information of a device. */
>> +typedef int (*rte_dma_info_get_t)(const struct rte_dma_dev *dev,
>> +				  struct rte_dma_info *dev_info,
>> +				  uint32_t info_sz);
> 
> Please move all driver interfaces in a file dedicated to drivers.

There are three head file: rte_dmadev.h, rte_dmadev_core.h, rte_dmadev_pmd.h
And we build the following dependency:

                rte_dmadev.h   ---> rte_dmadev_core.h          // mainly because dataplane inline API.
                    ^
                    |
           ---------------------
           |                   |
       Application       rte_dmadev_pmd.h
                               ^
                               |
                             DMA PMD


If move all driver interfaces to rte_dmadev_pmd.h from rte_dmadev_core.h, bidirectional
dependency may exist, e.g.

                rte_dmadev.h   ---> rte_dmadev_core.h  ---> rte_dmadev_pmd.h
                    ^
                    |
           ---------------------
           |                   |
       Application       rte_dmadev_pmd.h
                               ^
                               |
                             DMA PMD

So I think it's better keep it that way.

> 
> [...]
>> @@ -40,9 +96,13 @@ struct rte_dma_dev {
>>  	int16_t dev_id; /**< Device [external] identifier. */
>>  	int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */
>>  	void *dev_private; /**< PMD-specific private data. */
>> +	/** Functions exported by PMD. */
> 
> s/exported/implemented/
> 
>> +	const struct rte_dma_dev_ops *dev_ops;
>> +	struct rte_dma_conf dev_conf; /**< DMA device configuration. */
>>  	/** Device info which supplied during device initialization. */
>>  	struct rte_device *device;
>>  	enum rte_dma_dev_state state; /**< Flag indicating the device state. */
>> +	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
>>  	uint64_t reserved[2]; /**< Reserved for future fields. */
>>  } __rte_cache_aligned;
> 
> 
> 
> 
> .
> 

Thanks


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v23 1/6] dmadev: introduce DMA device library
  2021-10-08  7:13       ` fengchengwen
@ 2021-10-08 10:09         ` Thomas Monjalon
  0 siblings, 0 replies; 339+ messages in thread
From: Thomas Monjalon @ 2021-10-08 10:09 UTC (permalink / raw)
  To: fengchengwen
  Cc: ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko, dev, mb, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh, kevin.laatz

08/10/2021 09:13, fengchengwen:
> On 2021/10/6 18:26, Thomas Monjalon wrote:
> > 24/09/2021 12:53, Chengwen Feng:
> >> +++ b/lib/dmadev/rte_dmadev.h
> >> + * The dmadev are dynamically allocated by rte_dma_pmd_allocate() during the
> >> + * PCI/SoC device probing phase performed at EAL initialization time. And could
> >> + * be released by rte_dma_pmd_release() during the PCI/SoC device removing
> >> + * phase.
> > 
> > I don't think this text has value,
> > and we could imagine allocating a device ata later stage.
> 
> Yes, we could remove the stage descriptor because it's a well-known knowledge, but I
> recommend keeping the rte_dma_pmd_allocate and rte_dma_pmd_release functions, how about:
> 
>  * The dmadev are dynamically allocated by rte_dma_pmd_allocate(). And could
>  * be released by rte_dma_pmd_release().

These functions are for PMD.
This file is for applications, so it is not appropriate.

> > [...]
> >> + * Configure the maximum number of dmadevs.
> >> + * @note This function can be invoked before the primary process rte_eal_init()
> >> + * to change the maximum number of dmadevs.
> > 
> > You should mention what is the default.
> > Is the default exported to the app in this file?
> 
> The default macro is RTE_DMADEV_DEFAULT_MAX_DEVS, and I place it at rte_config.h.

No we avoid adding thinds in rte_config.h.
There should a static default which can be changed at runtime only.

> I think it's better to focus on one place (rte_config.h) than to modify config in multiple places (e.g. rte_dmadev.h/rte_xxx.h).

Config is modified only in one place: the function.

> >> + *
> >> + * @param dev_max
> >> + *   maximum number of dmadevs.
> >> + *
> >> + * @return
> >> + *   0 on success. Otherwise negative value is returned.
> >> + */
> >> +__rte_experimental
> >> +int rte_dma_dev_max(size_t dev_max);
> > 
> > What about a function able to do more with the name rte_dma_init?
> > It should allocate the inter-process shared memory,
> > and do the lookup in case of secondary process.
> 
> Yes, we defined dma_data_prepare() which do above thing, it's in 4th patch.
> 
> Because we could not invoke some like allocate inter-process shared memory before
> rte_eal_init, so I think it's better keep rte_dma_dev_max as it is.

Good point.

> >> +++ b/lib/dmadev/rte_dmadev_core.h
> >> +/**
> >> + * @file
> >> + *
> >> + * DMA Device internal header.
> >> + *
> >> + * This header contains internal data types, that are used by the DMA devices
> >> + * in order to expose their ops to the class.
> >> + *
> >> + * Applications should not use these API directly.
> > 
> > If it is not part of the API, it should not be exposed at all.
> > Why not having all these stuff in a file dmadev_driver.h?
> > Is it used by some inline functions?
> 
> Yes, it's used by dataplane inline functions.

OK, please give this reason in the description.



^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v23 2/6] dmadev: add control plane function support
  2021-10-08  7:55       ` fengchengwen
@ 2021-10-08 10:18         ` Thomas Monjalon
  0 siblings, 0 replies; 339+ messages in thread
From: Thomas Monjalon @ 2021-10-08 10:18 UTC (permalink / raw)
  To: fengchengwen
  Cc: ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko, dev, mb, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh, kevin.laatz

08/10/2021 09:55, fengchengwen:
> On 2021/10/6 18:46, Thomas Monjalon wrote:
> > 24/09/2021 12:53, Chengwen Feng:
> >> --- a/lib/dmadev/rte_dmadev.c
> >> +++ b/lib/dmadev/rte_dmadev.c
> >> @@ -218,6 +218,9 @@ rte_dma_pmd_release(const char *name)
> >>  	if (dev == NULL)
> >>  		return -EINVAL;
> >>  
> >> +	if (dev->state == RTE_DMA_DEV_READY)
> >> +		return rte_dma_close(dev->dev_id);
> > 
> > What is the logic here?
> > The only exposed function should be rte_dma_close()
> > and it should call the freeing function.
> > The API should use the dev_id. As you said somewhere else,
> > the name is only for debugging.
> > Please remove the function rte_dma_pmd_release(const char *name).
> 
> The rte_dma_pmd_release corresponding to pmd_allocate, so both use the 'const char *name' parameter.
> 
> The rte_dma_pmd_release is also used for error handling when PMD init. Therefore, a status variable
> is used here. If the device is not ready, only resources need to be released. Otherwise, the close
> interface of the driver is invoked.
> 
> For PMD, the rte_dma_pmd_release is only wrap for dev_close when remove device, it does not need to
> implement two callbacks.
> 
> If we replace rte_dma_pmd_release with rte_dma_close, then we should invoke rte_dma_close in error
> handling when PMD init, this can lead to conceptual inconsistencies because the initialization has
> not been successful.
> 
> So I think it's better keep rte_dma_pmd_release.

I will review again this logic in the next version.

> >> +/** DMA device support memory-to-memory transfer.
> >> + *
> >> + * @see struct rte_dma_info::dev_capa
> >> + */
> >> +#define RTE_DMA_CAPA_MEM_TO_MEM		RTE_BIT64(0)
> >> +/** DMA device support memory-to-device transfer.
> >> + *
> >> + * @see struct rte_dma_info::dev_capa
> >> + */
> >> +#define RTE_DMA_CAPA_MEM_TO_DEV		RTE_BIT64(1)
> > 
> > Same comment as in earlier version: please group the flags
> > in a doxygen group. Example of doxygen group:
> > https://patches.dpdk.org/project/dpdk/patch/20210830104232.598703-1-thomas@monjalon.net/
> 
> Tried, but found it didn't coexist well with multi-line comments.

What is not working? Example?
I think you didn't get what to do.
You must add a comment to give a title and group all these flags.

> >> --- a/lib/dmadev/rte_dmadev_core.h
> >> +++ b/lib/dmadev/rte_dmadev_core.h
> >> +/** @internal Used to get device information of a device. */
> >> +typedef int (*rte_dma_info_get_t)(const struct rte_dma_dev *dev,
> >> +				  struct rte_dma_info *dev_info,
> >> +				  uint32_t info_sz);
> > 
> > Please move all driver interfaces in a file dedicated to drivers.
> 
> There are three head file: rte_dmadev.h, rte_dmadev_core.h, rte_dmadev_pmd.h
> And we build the following dependency:
> 
>                 rte_dmadev.h   ---> rte_dmadev_core.h          // mainly because dataplane inline API.
>                     ^
>                     |
>            ---------------------
>            |                   |
>        Application       rte_dmadev_pmd.h
>                                ^
>                                |
>                              DMA PMD
> 
> 
> If move all driver interfaces to rte_dmadev_pmd.h from rte_dmadev_core.h, bidirectional
> dependency may exist, e.g.
> 
>                 rte_dmadev.h   ---> rte_dmadev_core.h  ---> rte_dmadev_pmd.h
>                     ^
>                     |
>            ---------------------
>            |                   |
>        Application       rte_dmadev_pmd.h
>                                ^
>                                |
>                              DMA PMD
> 
> So I think it's better keep it that way.

Please make sure only what is needed for inline is kept in the "core.h"
You should look at the current effort done by Konstanti in ethdev to hide everything.




^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v24 0/6] support dmadev
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (26 preceding siblings ...)
  2021-09-24 10:53 ` [dpdk-dev] [PATCH v23 0/6] support dmadev Chengwen Feng
@ 2021-10-09  9:33 ` Chengwen Feng
  2021-10-09  9:33   ` [dpdk-dev] [PATCH v24 1/6] dmadev: introduce DMA device library Chengwen Feng
                     ` (5 more replies)
  2021-10-11  7:33 ` [dpdk-dev] [PATCH v25 0/6] support dmadev Chengwen Feng
  2021-10-13 12:24 ` [dpdk-dev] [PATCH v26 0/6] support dmadev Chengwen Feng
  29 siblings, 6 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-10-09  9:33 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch set contains six patch for new add dmadev.

Chengwen Feng (6):
  dmadev: introduce DMA device library
  dmadev: add control plane API support
  dmadev: add data plane API support
  dmadev: add multi-process support
  dma/skeleton: introduce skeleton dmadev driver
  app/test: add dmadev API test

---
v24:
* use rte_dma_fp_object to hide implementation details.
* support group doxygen for RTE_DMA_CAPA_* and RTE_DMA_OP_*.
* adjusted the naming of some functions.
* fix typo.
v23:
* split multi-process support from 1st patch.
* fix some static check warning.
* fix skeleton cpu thread zero_req_count flip bug.
* add test_dmadev_api.h.
* add the description of modifying the dmadev state when init OK.
v22:
* function prefix change from rte_dmadev_* to rte_dma_*.
* change to prefix comment in most scenarios.
* dmadev dev_id use int16_t type.
* fix typo.
* organize patchsets in incremental mode.
v21:
* add comment for reserved fields of struct rte_dmadev.
v20:
* delete unnecessary and duplicate include header files.
* the conf_sz parameter is added to the configure and vchan-setup
  callbacks of the PMD, this is mainly used to enhance ABI
  compatibility.
* the rte_dmadev structure field is rearranged to reserve more space
  for I/O functions.
* fix some ambiguous and unnecessary comments.
* fix the potential memory leak of ut.
* redefine skeldma_init_once to skeldma_count.
* suppress rte_dmadev error output when execute ut.

 MAINTAINERS                            |    7 +
 app/test/meson.build                   |    4 +
 app/test/test_dmadev.c                 |   41 +
 app/test/test_dmadev_api.c             |  574 +++++++++++++
 app/test/test_dmadev_api.h             |    5 +
 doc/api/doxy-api-index.md              |    1 +
 doc/api/doxy-api.conf.in               |    1 +
 doc/guides/dmadevs/index.rst           |   12 +
 doc/guides/index.rst                   |    1 +
 doc/guides/prog_guide/dmadev.rst       |  120 +++
 doc/guides/prog_guide/img/dmadev.svg   |  283 +++++++
 doc/guides/prog_guide/index.rst        |    1 +
 doc/guides/rel_notes/release_21_11.rst |    6 +
 drivers/dma/meson.build                |    6 +
 drivers/dma/skeleton/meson.build       |    7 +
 drivers/dma/skeleton/skeleton_dmadev.c |  571 +++++++++++++
 drivers/dma/skeleton/skeleton_dmadev.h |   61 ++
 drivers/dma/skeleton/version.map       |    3 +
 drivers/meson.build                    |    1 +
 lib/dmadev/meson.build                 |    7 +
 lib/dmadev/rte_dmadev.c                |  844 +++++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 1048 ++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h           |   78 ++
 lib/dmadev/rte_dmadev_pmd.h            |  173 ++++
 lib/dmadev/version.map                 |   35 +
 lib/meson.build                        |    1 +
 26 files changed, 3891 insertions(+)
 create mode 100644 app/test/test_dmadev.c
 create mode 100644 app/test/test_dmadev_api.c
 create mode 100644 app/test/test_dmadev_api.h
 create mode 100644 doc/guides/dmadevs/index.rst
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg
 create mode 100644 drivers/dma/meson.build
 create mode 100644 drivers/dma/skeleton/meson.build
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
 create mode 100644 drivers/dma/skeleton/version.map
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v24 1/6] dmadev: introduce DMA device library
  2021-10-09  9:33 ` [dpdk-dev] [PATCH v24 0/6] support dmadev Chengwen Feng
@ 2021-10-09  9:33   ` Chengwen Feng
  2021-10-09  9:33   ` [dpdk-dev] [PATCH v24 2/6] dmadev: add control plane API support Chengwen Feng
                     ` (4 subsequent siblings)
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-10-09  9:33 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

The 'dmadev' is a generic type of DMA device.

This patch introduce the 'dmadev' device allocation functions.

The infrastructure is prepared to welcome drivers in drivers/dma/

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 MAINTAINERS                            |   5 +
 doc/api/doxy-api-index.md              |   1 +
 doc/api/doxy-api.conf.in               |   1 +
 doc/guides/dmadevs/index.rst           |  12 ++
 doc/guides/index.rst                   |   1 +
 doc/guides/prog_guide/dmadev.rst       |  60 ++++++
 doc/guides/prog_guide/img/dmadev.svg   | 283 +++++++++++++++++++++++++
 doc/guides/prog_guide/index.rst        |   1 +
 doc/guides/rel_notes/release_21_11.rst |   4 +
 drivers/dma/meson.build                |   4 +
 drivers/meson.build                    |   1 +
 lib/dmadev/meson.build                 |   6 +
 lib/dmadev/rte_dmadev.c                | 246 +++++++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 133 ++++++++++++
 lib/dmadev/rte_dmadev_pmd.h            |  90 ++++++++
 lib/dmadev/version.map                 |  20 ++
 lib/meson.build                        |   1 +
 17 files changed, 869 insertions(+)
 create mode 100644 doc/guides/dmadevs/index.rst
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg
 create mode 100644 drivers/dma/meson.build
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 278e5b3226..119cfaa04e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -454,6 +454,11 @@ F: app/test-regex/
 F: doc/guides/prog_guide/regexdev.rst
 F: doc/guides/regexdevs/features/default.ini
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+F: doc/guides/prog_guide/dmadev.rst
+
 Eventdev API
 M: Jerin Jacob <jerinj@marvell.com>
 T: git://dpdk.org/next/dpdk-next-eventdev
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107a03..2939050431 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -21,6 +21,7 @@ The public API headers are grouped by topics:
   [compressdev]        (@ref rte_compressdev.h),
   [compress]           (@ref rte_comp.h),
   [regexdev]           (@ref rte_regexdev.h),
+  [dmadev]             (@ref rte_dmadev.h),
   [eventdev]           (@ref rte_eventdev.h),
   [event_eth_rx_adapter]   (@ref rte_event_eth_rx_adapter.h),
   [event_eth_tx_adapter]   (@ref rte_event_eth_tx_adapter.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 325a0195c6..109ec1f682 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -35,6 +35,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
                           @TOPDIR@/lib/distributor \
+                          @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/efd \
                           @TOPDIR@/lib/ethdev \
                           @TOPDIR@/lib/eventdev \
diff --git a/doc/guides/dmadevs/index.rst b/doc/guides/dmadevs/index.rst
new file mode 100644
index 0000000000..0bce29d766
--- /dev/null
+++ b/doc/guides/dmadevs/index.rst
@@ -0,0 +1,12 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright 2021 HiSilicon Limited
+
+DMA Device Drivers
+==================
+
+The following are a list of DMA device drivers, which can be used from
+an application through DMA API.
+
+.. toctree::
+   :maxdepth: 2
+   :numbered:
diff --git a/doc/guides/index.rst b/doc/guides/index.rst
index 857f0363d3..919825992e 100644
--- a/doc/guides/index.rst
+++ b/doc/guides/index.rst
@@ -21,6 +21,7 @@ DPDK documentation
    compressdevs/index
    vdpadevs/index
    regexdevs/index
+   dmadevs/index
    eventdevs/index
    rawdevs/index
    mempool/index
diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
new file mode 100644
index 0000000000..90bda28f33
--- /dev/null
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -0,0 +1,60 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright 2021 HiSilicon Limited
+
+DMA Device Library
+==================
+
+The DMA library provides a DMA device framework for management and provisioning
+of hardware and software DMA poll mode drivers, defining generic API which
+support a number of different DMA operations.
+
+
+Design Principles
+-----------------
+
+The DMA framework provides a generic DMA device framework which supports both
+physical (hardware) and virtual (software) DMA devices, as well as a generic DMA
+API which allows DMA devices to be managed and configured, and supports DMA
+operations to be provisioned on DMA poll mode driver.
+
+.. _figure_dmadev:
+
+.. figure:: img/dmadev.*
+
+The above figure shows the model on which the DMA framework is built on:
+
+ * The DMA controller could have multiple hardware DMA channels (aka. hardware
+   DMA queues), each hardware DMA channel should be represented by a dmadev.
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+   channel represents a different transfer context. The DMA operation request
+   must be submitted to the virtual DMA channel. e.g. Application could create
+   virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+   virtual DMA channel 1 for memory-to-device transfer scenario.
+
+
+Device Management
+-----------------
+
+Device Creation
+~~~~~~~~~~~~~~~
+
+Physical DMA controllers are discovered during the PCI probe/enumeration of the
+EAL function which is executed at DPDK initialization, this is based on their
+PCI BDF (bus/bridge, device, function). Specific physical DMA controllers, like
+other physical devices in DPDK can be listed using the EAL command line options.
+
+The dmadevs are dynamically allocated by using the function
+``rte_dma_pmd_allocate`` based on the number of hardware DMA channels.
+
+
+Device Identification
+~~~~~~~~~~~~~~~~~~~~~
+
+Each DMA device, whether physical or virtual is uniquely designated by two
+identifiers:
+
+- A unique device index used to designate the DMA device in all functions
+  exported by the DMA API.
+
+- A device name used to designate the DMA device in console messages, for
+  administration or debugging purposes.
diff --git a/doc/guides/prog_guide/img/dmadev.svg b/doc/guides/prog_guide/img/dmadev.svg
new file mode 100644
index 0000000000..157d7eb7dc
--- /dev/null
+++ b/doc/guides/prog_guide/img/dmadev.svg
@@ -0,0 +1,283 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<!-- SPDX-License-Identifier: BSD-3-Clause -->
+<!-- Copyright(c) 2021 HiSilicon Limited -->
+
+<svg
+   width="128.64288mm"
+   height="95.477707mm"
+   viewBox="0 0 192.96433 143.21656"
+   version="1.1"
+   id="svg934"
+   inkscape:version="1.1 (c68e22c387, 2021-05-23)"
+   sodipodi:docname="dmadev.svg"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg">
+  <sodipodi:namedview
+     id="namedview936"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageshadow="2"
+     inkscape:pageopacity="0.0"
+     inkscape:pagecheckerboard="0"
+     inkscape:document-units="mm"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:showpageshadow="false"
+     inkscape:zoom="1.332716"
+     inkscape:cx="335.03011"
+     inkscape:cy="143.69152"
+     inkscape:window-width="1920"
+     inkscape:window-height="976"
+     inkscape:window-x="-8"
+     inkscape:window-y="-8"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="layer1"
+     scale-x="1.5"
+     units="mm" />
+  <defs
+     id="defs931">
+    <rect
+       x="342.43954"
+       y="106.56832"
+       width="58.257381"
+       height="137.82834"
+       id="rect17873" />
+  </defs>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-0.13857517,-21.527306)">
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9"
+       width="50"
+       height="28"
+       x="0.13857517"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1"
+       transform="translate(-49.110795,15.205683)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1045">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1047">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5"
+       width="50"
+       height="28"
+       x="60.138577"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4"
+       transform="translate(10.512565,15.373298)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1049">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1051">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5-3"
+       width="50"
+       height="28"
+       x="137.43863"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-8"
+       transform="translate(88.79231,15.373299)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1053">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1055">channel</tspan></text>
+    <text
+       xml:space="preserve"
+       transform="matrix(0.26458333,0,0,0.26458333,-0.04940429,21.408845)"
+       id="text17871"
+       style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;white-space:pre;shape-inside:url(#rect17873);fill:#000000;fill-opacity:1;stroke:none" />
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8"
+       width="38.34557"
+       height="19.729115"
+       x="36.138577"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3"
+       transform="translate(-13.394978,59.135217)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1057">dmadev</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0"
+       width="60.902534"
+       height="24.616455"
+       x="25.196909"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76"
+       transform="translate(-24.485484,90.97883)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1059">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1061">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-6"
+       width="60.902534"
+       height="24.616455"
+       x="132.20036"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-7"
+       transform="translate(82.950904,90.79085)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1063">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1065">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-4"
+       width="60.902534"
+       height="24.616455"
+       x="76.810928"
+       y="140.12741"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-4"
+       transform="translate(27.032341,133.10574)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1067">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1069">controller</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8-5"
+       width="38.34557"
+       height="19.729115"
+       x="143.43863"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-7"
+       transform="translate(94.92597,59.664385)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1071">dmadev</tspan></text>
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 74.476373,49.527306 62.82407,64.827354"
+       id="path45308"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 35.924309,49.527306 47.711612,64.827354"
+       id="path45310"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 55.403414,84.556469 55.53332,98.47744"
+       id="path45312"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8"
+       inkscape:connection-end="#rect31-9-5-8-0" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.62241,84.556469 0.0155,13.920971"
+       id="path45320"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-5"
+       inkscape:connection-end="#rect31-9-5-8-0-6" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 146.28317,123.09389 -22.65252,17.03352"
+       id="path45586"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0-6"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 70.900938,123.09389 21.108496,17.03352"
+       id="path45588"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.50039,49.527306 0.0675,15.300048"
+       id="path45956"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-3"
+       inkscape:connection-end="#rect31-9-5-8-5" />
+  </g>
+</svg>
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 2dce507f46..89af28dacb 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -27,6 +27,7 @@ Programmer's Guide
     cryptodev_lib
     compressdev
     regexdev
+    dmadev
     rte_security
     rawdev
     link_bonding_poll_mode_drv_lib
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 89d4b33ef1..929c0d6113 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -141,6 +141,10 @@ New Features
   * Added tests to validate packets hard expiry.
   * Added tests to verify tunnel header verification in IPsec inbound.
 
+* **Introduced dmadev library with:**
+
+  * Device allocation functions.
+
 
 Removed Items
 -------------
diff --git a/drivers/dma/meson.build b/drivers/dma/meson.build
new file mode 100644
index 0000000000..a24c56d8ff
--- /dev/null
+++ b/drivers/dma/meson.build
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2021 HiSilicon Limited
+
+drivers = []
diff --git a/drivers/meson.build b/drivers/meson.build
index 3d08540581..b7d680868a 100644
--- a/drivers/meson.build
+++ b/drivers/meson.build
@@ -18,6 +18,7 @@ subdirs = [
         'vdpa',           # depends on common, bus and mempool.
         'event',          # depends on common, bus, mempool and net.
         'baseband',       # depends on common and bus.
+        'dma',            # depends on common and bus.
 ]
 
 if meson.is_cross_build()
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000000..f8d54c6e74
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+sources = files('rte_dmadev.c')
+headers = files('rte_dmadev.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000000..42a4693bd9
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include <inttypes.h>
+
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+static int16_t dma_devices_max;
+
+struct rte_dma_dev *rte_dma_devices;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
+#define RTE_DMA_LOG(level, ...) \
+	rte_log(RTE_LOG_ ## level, rte_dma_logtype, RTE_FMT("dma: " \
+		RTE_FMT_HEAD(__VA_ARGS__,) "\n", RTE_FMT_TAIL(__VA_ARGS__,)))
+
+int
+rte_dma_dev_max(size_t dev_max)
+{
+	/* This function may be called before rte_eal_init(), so no rte library
+	 * function can be called in this function.
+	 */
+	if (dev_max == 0 || dev_max > INT16_MAX)
+		return -EINVAL;
+
+	if (dma_devices_max > 0)
+		return -EINVAL;
+
+	dma_devices_max = dev_max;
+
+	return 0;
+}
+
+static int
+dma_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMA_LOG(ERR, "Name can't be NULL");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMA_LOG(ERR, "Zero length DMA device name");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DEV_NAME_MAX_LEN) {
+		RTE_DMA_LOG(ERR, "DMA device name is too long");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int16_t
+dma_find_free_id(void)
+{
+	int16_t i;
+
+	if (rte_dma_devices == NULL)
+		return -1;
+
+	for (i = 0; i < dma_devices_max; i++) {
+		if (rte_dma_devices[i].state == RTE_DMA_DEV_UNUSED)
+			return i;
+	}
+
+	return -1;
+}
+
+static struct rte_dma_dev*
+dma_find_by_name(const char *name)
+{
+	int16_t i;
+
+	if (rte_dma_devices == NULL)
+		return NULL;
+
+	for (i = 0; i < dma_devices_max; i++) {
+		if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) &&
+		    (!strcmp(name, rte_dma_devices[i].dev_name)))
+			return &rte_dma_devices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dma_dev_data_prepare(void)
+{
+	size_t size;
+
+	if (rte_dma_devices != NULL)
+		return 0;
+
+	size = dma_devices_max * sizeof(struct rte_dma_dev);
+	rte_dma_devices = malloc(size);
+	if (rte_dma_devices == NULL)
+		return -ENOMEM;
+	memset(rte_dma_devices, 0, size);
+
+	return 0;
+}
+
+static int
+dma_data_prepare(void)
+{
+	if (dma_devices_max == 0)
+		dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
+	return dma_dev_data_prepare();
+}
+
+static struct rte_dma_dev *
+dma_allocate(const char *name, int numa_node, size_t private_data_size)
+{
+	struct rte_dma_dev *dev;
+	void *dev_private;
+	int16_t dev_id;
+	int ret;
+
+	ret = dma_data_prepare();
+	if (ret < 0) {
+		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
+		return NULL;
+	}
+
+	dev = dma_find_by_name(name);
+	if (dev != NULL) {
+		RTE_DMA_LOG(ERR, "DMA device already allocated");
+		return NULL;
+	}
+
+	dev_private = rte_zmalloc_socket(name, private_data_size,
+					 RTE_CACHE_LINE_SIZE, numa_node);
+	if (dev_private == NULL) {
+		RTE_DMA_LOG(ERR, "Cannot allocate private data");
+		return NULL;
+	}
+
+	dev_id = dma_find_free_id();
+	if (dev_id < 0) {
+		RTE_DMA_LOG(ERR, "Reached maximum number of DMA devices");
+		rte_free(dev_private);
+		return NULL;
+	}
+
+	dev = &rte_dma_devices[dev_id];
+	rte_strscpy(dev->dev_name, name, sizeof(dev->dev_name));
+	dev->dev_id = dev_id;
+	dev->numa_node = numa_node;
+	dev->dev_private = dev_private;
+
+	return dev;
+}
+
+static void
+dma_release(struct rte_dma_dev *dev)
+{
+	rte_free(dev->dev_private);
+	memset(dev, 0, sizeof(struct rte_dma_dev));
+}
+
+struct rte_dma_dev *
+rte_dma_pmd_allocate(const char *name, int numa_node, size_t private_data_size)
+{
+	struct rte_dma_dev *dev;
+
+	if (dma_check_name(name) != 0 || private_data_size == 0)
+		return NULL;
+
+	dev = dma_allocate(name, numa_node, private_data_size);
+	if (dev == NULL)
+		return NULL;
+
+	dev->state = RTE_DMA_DEV_REGISTERED;
+
+	return dev;
+}
+
+int
+rte_dma_pmd_release(const char *name)
+{
+	struct rte_dma_dev *dev;
+
+	if (dma_check_name(name) != 0)
+		return -EINVAL;
+
+	dev = dma_find_by_name(name);
+	if (dev == NULL)
+		return -EINVAL;
+
+	dma_release(dev);
+	return 0;
+}
+
+int
+rte_dma_get_dev_id_by_name(const char *name)
+{
+	struct rte_dma_dev *dev;
+
+	if (dma_check_name(name) != 0)
+		return -EINVAL;
+
+	dev = dma_find_by_name(name);
+	if (dev == NULL)
+		return -EINVAL;
+
+	return dev->dev_id;
+}
+
+bool
+rte_dma_is_valid(int16_t dev_id)
+{
+	return (dev_id >= 0) && (dev_id < dma_devices_max) &&
+		rte_dma_devices != NULL &&
+		rte_dma_devices[dev_id].state != RTE_DMA_DEV_UNUSED;
+}
+
+uint16_t
+rte_dma_count_avail(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	if (rte_dma_devices == NULL)
+		return count;
+
+	for (i = 0; i < dma_devices_max; i++) {
+		if (rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED)
+			count++;
+	}
+
+	return count;
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000000..87810f2f08
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ * Copyright(c) 2021 Intel Corporation
+ * Copyright(c) 2021 Marvell International Ltd
+ * Copyright(c) 2021 SmartShare Systems
+ */
+
+#ifndef RTE_DMADEV_H
+#define RTE_DMADEV_H
+
+/**
+ * @file rte_dmadev.h
+ *
+ * DMA (Direct Memory Access) device API.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW DMA channel |               | HW DMA channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW DMA Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel. e.g. Application could create
+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+ * virtual DMA channel 1 for memory-to-device transfer scenario.
+ *
+ * This framework uses 'int16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ */
+
+#include <stdint.h>
+
+#include <rte_bitops.h>
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Maximum number of devices if rte_dma_dev_max() is not called. */
+#define RTE_DMADEV_DEFAULT_MAX 64
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure the maximum number of dmadevs.
+ * @note This function can be invoked before the primary process rte_eal_init()
+ * to change the maximum number of dmadevs. If not invoked, the maximum number
+ * of dmadevs is @see RTE_DMADEV_DEFAULT_MAX
+ *
+ * @param dev_max
+ *   maximum number of dmadevs.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_dev_max(size_t dev_max);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the device identifier for the named DMA device.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   Returns DMA device identifier on success.
+ *   - <0: Failure to find named DMA device.
+ */
+__rte_experimental
+int rte_dma_get_dev_id_by_name(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Check whether the dev_id is valid.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool rte_dma_is_valid(int16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t rte_dma_count_avail(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_DMADEV_H */
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000000..bb09382dce
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+#ifndef RTE_DMADEV_PMD_H
+#define RTE_DMADEV_PMD_H
+
+/**
+ * @file
+ *
+ * DMA Device PMD interface
+ *
+ * Driver facing interface for a DMA device. These are not to be called directly
+ * by any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Possible states of a DMA device.
+ *
+ * @see struct rte_dma_dev::state
+ */
+enum rte_dma_dev_state {
+	RTE_DMA_DEV_UNUSED = 0, /**< Device is unused. */
+	/** Device is registered, but not ready to be used. */
+	RTE_DMA_DEV_REGISTERED,
+	/** Device is ready for use. This is set by the PMD. */
+	RTE_DMA_DEV_READY,
+};
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ */
+struct rte_dma_dev {
+	char dev_name[RTE_DEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	int16_t dev_id; /**< Device [external] identifier. */
+	int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */
+	void *dev_private; /**< PMD-specific private data. */
+	/** Device info which supplied during device initialization. */
+	struct rte_device *device;
+	enum rte_dma_dev_state state; /**< Flag indicating the device state. */
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+} __rte_cache_aligned;
+
+extern struct rte_dma_dev *rte_dma_devices;
+
+/**
+ * @internal
+ * Allocate a new dmadev slot for an DMA device and return the pointer to that
+ * slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ * @param numa_node
+ *   Driver's private data's NUMA node.
+ * @param private_data_size
+ *   Driver's private data size.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dma_dev *rte_dma_pmd_allocate(const char *name, int numa_node,
+					 size_t private_data_size);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   - 0 on success, negative on error.
+ */
+__rte_internal
+int rte_dma_pmd_release(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_DMADEV_PMD_H */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000000..f8a0076468
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,20 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dma_count_avail;
+	rte_dma_dev_max;
+	rte_dma_get_dev_id_by_name;
+	rte_dma_is_valid;
+
+	local: *;
+};
+
+INTERNAL {
+	global:
+
+	rte_dma_devices;
+	rte_dma_pmd_allocate;
+	rte_dma_pmd_release;
+
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index 9c4841fe40..0bf1f51357 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -45,6 +45,7 @@ libraries = [
         'pdump',
         'rawdev',
         'regexdev',
+        'dmadev',
         'rib',
         'reorder',
         'sched',
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v24 2/6] dmadev: add control plane API support
  2021-10-09  9:33 ` [dpdk-dev] [PATCH v24 0/6] support dmadev Chengwen Feng
  2021-10-09  9:33   ` [dpdk-dev] [PATCH v24 1/6] dmadev: introduce DMA device library Chengwen Feng
@ 2021-10-09  9:33   ` Chengwen Feng
  2021-10-09  9:33   ` [dpdk-dev] [PATCH v24 3/6] dmadev: add data " Chengwen Feng
                     ` (3 subsequent siblings)
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-10-09  9:33 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch add control plane API for dmadev.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 doc/guides/prog_guide/dmadev.rst       |  38 ++
 doc/guides/rel_notes/release_21_11.rst |   1 +
 lib/dmadev/rte_dmadev.c                | 360 +++++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 464 +++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_pmd.h            |  61 ++++
 lib/dmadev/version.map                 |   9 +
 6 files changed, 933 insertions(+)

diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
index 90bda28f33..5c70ad3d6a 100644
--- a/doc/guides/prog_guide/dmadev.rst
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -58,3 +58,41 @@ identifiers:
 
 - A device name used to designate the DMA device in console messages, for
   administration or debugging purposes.
+
+
+Device Configuration
+~~~~~~~~~~~~~~~~~~~~
+
+The rte_dma_configure API is used to configure a DMA device.
+
+.. code-block:: c
+
+   int rte_dma_configure(int16_t dev_id,
+                         const struct rte_dma_conf *dev_conf);
+
+The ``rte_dma_conf`` structure is used to pass the configuration parameters
+for the DMA device.
+
+
+Configuration of Virtual DMA Channels
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The rte_dma_vchan_setup API is used to configure a virtual DMA channel.
+
+.. code-block:: c
+
+   int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+                           const struct rte_dma_vchan_conf *conf);
+
+The ``rte_dma_vchan_conf`` structure is used to pass the configuration
+parameters for the virtual DMA channel.
+
+
+Device Features and Capabilities
+--------------------------------
+
+DMA devices may support different feature sets. The ``rte_dma_info_get`` API
+can be used to get the device info and supported features.
+
+Silent mode is a special device capability which does not require the
+application to invoke dequeue APIs.
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 929c0d6113..f935a3f395 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -144,6 +144,7 @@ New Features
 * **Introduced dmadev library with:**
 
   * Device allocation functions.
+  * Control plane API.
 
 
 Removed Items
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
index 42a4693bd9..a6a5680d2b 100644
--- a/lib/dmadev/rte_dmadev.c
+++ b/lib/dmadev/rte_dmadev.c
@@ -201,6 +201,9 @@ rte_dma_pmd_release(const char *name)
 	if (dev == NULL)
 		return -EINVAL;
 
+	if (dev->state == RTE_DMA_DEV_READY)
+		return rte_dma_close(dev->dev_id);
+
 	dma_release(dev);
 	return 0;
 }
@@ -244,3 +247,360 @@ rte_dma_count_avail(void)
 
 	return count;
 }
+
+int
+rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
+{
+	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	if (!rte_dma_is_valid(dev_id) || dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dma_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dma_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->numa_node = dev->device->numa_node;
+	dev_info->nb_vchans = dev->dev_conf.nb_vchans;
+
+	return 0;
+}
+
+int
+rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	struct rte_dma_info dev_info;
+	int ret;
+
+	if (!rte_dma_is_valid(dev_id) || dev_conf == NULL)
+		return -EINVAL;
+
+	if (dev->dev_started != 0) {
+		RTE_DMA_LOG(ERR,
+			"Device %d must be stopped to allow configuration",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dma_info_get(dev_id, &dev_info);
+	if (ret != 0) {
+		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans == 0) {
+		RTE_DMA_LOG(ERR,
+			"Device %d configure zero vchans", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans > dev_info.max_vchans) {
+		RTE_DMA_LOG(ERR,
+			"Device %d configure too many vchans", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_silent &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) {
+		RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
+					     sizeof(struct rte_dma_conf));
+	if (ret == 0)
+		memcpy(&dev->dev_conf, dev_conf, sizeof(struct rte_dma_conf));
+
+	return ret;
+}
+
+int
+rte_dma_start(int16_t dev_id)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	if (!rte_dma_is_valid(dev_id))
+		return -EINVAL;
+
+	if (dev->dev_conf.nb_vchans == 0) {
+		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
+		return -EINVAL;
+	}
+
+	if (dev->dev_started != 0) {
+		RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dma_stop(int16_t dev_id)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	if (!rte_dma_is_valid(dev_id))
+		return -EINVAL;
+
+	if (dev->dev_started == 0) {
+		RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dma_close(int16_t dev_id)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	if (!rte_dma_is_valid(dev_id))
+		return -EINVAL;
+
+	/* Device must be stopped before it can be closed */
+	if (dev->dev_started == 1) {
+		RTE_DMA_LOG(ERR,
+			"Device %d must be stopped before closing", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_close)(dev);
+	if (ret == 0)
+		dma_release(dev);
+
+	return ret;
+}
+
+int
+rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+		    const struct rte_dma_vchan_conf *conf)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	struct rte_dma_info dev_info;
+	bool src_is_dev, dst_is_dev;
+	int ret;
+
+	if (!rte_dma_is_valid(dev_id) || conf == NULL)
+		return -EINVAL;
+
+	if (dev->dev_started != 0) {
+		RTE_DMA_LOG(ERR,
+			"Device %d must be stopped to allow configuration",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dma_info_get(dev_id, &dev_info);
+	if (ret != 0) {
+		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+		return -EINVAL;
+	}
+	if (dev->dev_conf.nb_vchans == 0) {
+		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
+		return -EINVAL;
+	}
+	if (vchan >= dev_info.nb_vchans) {
+		RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+		RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support mem2mem transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support mem2dev transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support dev2mem transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support dev2dev transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < dev_info.min_desc ||
+	    conf->nb_desc > dev_info.max_desc) {
+		RTE_DMA_LOG(ERR,
+			"Device %d number of descriptors invalid", dev_id);
+		return -EINVAL;
+	}
+	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) ||
+	    (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) {
+		RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id);
+		return -EINVAL;
+	}
+	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) ||
+	    (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d destination port type invalid", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
+					sizeof(struct rte_dma_vchan_conf));
+}
+
+int
+rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
+{
+	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+	if (!rte_dma_is_valid(dev_id) || stats == NULL)
+		return -EINVAL;
+
+	if (vchan >= dev->dev_conf.nb_vchans &&
+	    vchan != RTE_DMA_ALL_VCHAN) {
+		RTE_DMA_LOG(ERR,
+			"Device %d vchan %u out of range", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	memset(stats, 0, sizeof(struct rte_dma_stats));
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dma_stats));
+}
+
+int
+rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+	if (!rte_dma_is_valid(dev_id))
+		return -EINVAL;
+
+	if (vchan >= dev->dev_conf.nb_vchans &&
+	    vchan != RTE_DMA_ALL_VCHAN) {
+		RTE_DMA_LOG(ERR,
+			"Device %d vchan %u out of range", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+static const char *
+dma_capability_name(uint64_t capability)
+{
+	static const struct {
+		uint64_t capability;
+		const char *name;
+	} capa_names[] = {
+		{ RTE_DMA_CAPA_MEM_TO_MEM,  "mem2mem" },
+		{ RTE_DMA_CAPA_MEM_TO_DEV,  "mem2dev" },
+		{ RTE_DMA_CAPA_DEV_TO_MEM,  "dev2mem" },
+		{ RTE_DMA_CAPA_DEV_TO_DEV,  "dev2dev" },
+		{ RTE_DMA_CAPA_SVA,         "sva"     },
+		{ RTE_DMA_CAPA_SILENT,      "silent"  },
+		{ RTE_DMA_CAPA_OPS_COPY,    "copy"    },
+		{ RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" },
+		{ RTE_DMA_CAPA_OPS_FILL,    "fill"    },
+	};
+
+	const char *name = "unknown";
+	uint32_t i;
+
+	for (i = 0; i < RTE_DIM(capa_names); i++) {
+		if (capability == capa_names[i].capability) {
+			name = capa_names[i].name;
+			break;
+		}
+	}
+
+	return name;
+}
+
+static void
+dma_dump_capability(FILE *f, uint64_t dev_capa)
+{
+	uint64_t capa;
+
+	(void)fprintf(f, "  dev_capa: 0x%" PRIx64 " -", dev_capa);
+	while (dev_capa > 0) {
+		capa = 1ull << __builtin_ctzll(dev_capa);
+		(void)fprintf(f, " %s", dma_capability_name(capa));
+		dev_capa &= ~capa;
+	}
+	(void)fprintf(f, "\n");
+}
+
+int
+rte_dma_dump(int16_t dev_id, FILE *f)
+{
+	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	struct rte_dma_info dev_info;
+	int ret;
+
+	if (!rte_dma_is_valid(dev_id) || f == NULL)
+		return -EINVAL;
+
+	ret = rte_dma_info_get(dev_id, &dev_info);
+	if (ret != 0) {
+		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+		return -EINVAL;
+	}
+
+	(void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
+		dev->dev_id,
+		dev->dev_name,
+		dev->dev_started ? "started" : "stopped");
+	dma_dump_capability(f, dev_info.dev_capa);
+	(void)fprintf(f, "  max_vchans_supported: %u\n", dev_info.max_vchans);
+	(void)fprintf(f, "  nb_vchans_configured: %u\n", dev_info.nb_vchans);
+	(void)fprintf(f, "  silent_mode: %s\n",
+		dev->dev_conf.enable_silent ? "on" : "off");
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 87810f2f08..34a4c26851 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -48,6 +48,29 @@
  * This framework uses 'int16_t dev_id' as the device identifier of a dmadev,
  * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
  *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dma_configure()
+ *     - rte_dma_vchan_setup()
+ *     - rte_dma_start()
+ *
+ * Then, the application can invoke dataplane functions to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dma_configure() or rte_dma_vchan_setup()), it must invoke
+ * rte_dma_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dma_start() again. The dataplane functions should not
+ * be invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the rte_dma_close()
+ * function.
+ *
+ * About MT-safe, all the functions of the dmadev API implemented by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
  */
 
 #include <stdint.h>
@@ -126,6 +149,447 @@ bool rte_dma_is_valid(int16_t dev_id);
 __rte_experimental
 uint16_t rte_dma_count_avail(void);
 
+/**@{@name DMA capability
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_MEM_TO_MEM		RTE_BIT64(0)
+/**< Support memory-to-memory transfer */
+#define RTE_DMA_CAPA_MEM_TO_DEV		RTE_BIT64(1)
+/**< Support memory-to-device transfer. */
+#define RTE_DMA_CAPA_DEV_TO_MEM		RTE_BIT64(2)
+/**< Support device-to-memory transfer. */
+#define RTE_DMA_CAPA_DEV_TO_DEV		RTE_BIT64(3)
+/**< Support device-to-device transfer. */
+#define RTE_DMA_CAPA_SVA		RTE_BIT64(4)
+/**< Support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ */
+#define RTE_DMA_CAPA_SILENT		RTE_BIT64(5)
+/**< Support work in silent mode.
+ * In this mode, application don't required to invoke rte_dma_completed*()
+ * API.
+ * @see struct rte_dma_conf::silent_mode
+ */
+#define RTE_DMA_CAPA_OPS_COPY		RTE_BIT64(32)
+/**< Support copy operation.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ */
+#define RTE_DMA_CAPA_OPS_COPY_SG	RTE_BIT64(33)
+/**< Support scatter-gather list copy operation. */
+#define RTE_DMA_CAPA_OPS_FILL		RTE_BIT64(34)
+/**< Support fill operation. */
+/**@}*/
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ *
+ * @see rte_dma_info_get
+ */
+struct rte_dma_info {
+	/** Device capabilities (RTE_DMA_CAPA_*). */
+	uint64_t dev_capa;
+	/** Maximum number of virtual DMA channels supported. */
+	uint16_t max_vchans;
+	/** Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t max_desc;
+	/** Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/** Maximum number of source or destination scatter-gather entry
+	 * supported.
+	 * If the device does not support COPY_SG capability, this value can be
+	 * zero.
+	 * If the device supports COPY_SG capability, then rte_dma_copy_sg()
+	 * parameter nb_src/nb_dst should not exceed this value.
+	 */
+	uint16_t max_sges;
+	/** NUMA node connection, -1 if unknown. */
+	int16_t numa_node;
+	/** Number of virtual DMA channel configured. */
+	uint16_t nb_vchans;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dma_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ *
+ * @see rte_dma_configure
+ */
+struct rte_dma_conf {
+	/** The number of virtual DMA channels to set up for the DMA device.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dma_info which get from rte_dma_info_get().
+	 */
+	uint16_t nb_vchans;
+	/** Indicates whether to enable silent mode.
+	 * false-default mode, true-silent mode.
+	 * This value can be set to true only when the SILENT capability is
+	 * supported.
+	 *
+	 * @see RTE_DMA_CAPA_SILENT
+	 */
+	bool enable_silent;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dma_conf
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_start(int16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dma_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stop(int16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_close(int16_t dev_id);
+
+/**
+ * DMA transfer direction defines.
+ *
+ * @see struct rte_dma_vchan_conf::direction
+ */
+enum rte_dma_direction {
+	/** DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_MEM,
+	/** DMA transfer direction - from memory to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is SoCs memory) to device (which is host memory).
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV,
+	/** DMA transfer direction - from device to memory.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to memory (which is SoCs memory).
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM,
+	/** DMA transfer direction - from device to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to the device (which is another host memory).
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV,
+};
+
+/**
+ * DMA access port type defines.
+ *
+ * @see struct rte_dma_port_param::port_type
+ */
+enum rte_dma_port_type {
+	RTE_DMA_PORT_NONE,
+	RTE_DMA_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dma_vchan_conf::src_port
+ * @see struct rte_dma_vchan_conf::dst_port
+ */
+struct rte_dma_port_param {
+	/** The device access port type.
+	 *
+	 * @see enum rte_dma_port_type
+	 */
+	enum rte_dma_port_type port_type;
+	RTE_STD_C11
+	union {
+		/** PCIe access port parameters.
+		 *
+		 * The following model shows SoC's PCIe module connects to
+		 * multiple PCIe hosts and multiple endpoints. The PCIe module
+		 * has an integrated DMA controller.
+		 *
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * \code{.unparsed}
+		 * System Bus
+		 *    |     ----------PCIe module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIe Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIe Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIe Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * \endcode
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		__extension__
+		struct {
+			uint64_t coreid : 4; /**< PCIe core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			/** The pasid filed in TLP packet. */
+			uint64_t pasid : 20;
+			/** The attributes filed in TLP packet. */
+			uint64_t attr : 3;
+			/** The processing hint filed in TLP packet. */
+			uint64_t ph : 2;
+			/** The steering tag filed in TLP packet. */
+			uint64_t st : 16;
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ *
+ * @see rte_dma_vchan_setup
+ */
+struct rte_dma_vchan_conf {
+	/** Transfer direction
+	 *
+	 * @see enum rte_dma_direction
+	 */
+	enum rte_dma_direction direction;
+	/** Number of descriptor for the virtual DMA channel */
+	uint16_t nb_desc;
+	/** 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 *
+	 * @see struct rte_dma_port_param
+	 */
+	struct rte_dma_port_param src_port;
+	/** 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 *
+	 * @see struct rte_dma_port_param
+	 */
+	struct rte_dma_port_param dst_port;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel. The value must be in the range
+ *   [0, nb_vchans - 1] previously supplied to rte_dma_configure().
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dma_vchan_conf object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+			const struct rte_dma_vchan_conf *conf);
+
+/**
+ * A structure used to retrieve statistics.
+ *
+ * @see rte_dma_stats_get
+ */
+struct rte_dma_stats {
+	/** Count of operations which were submitted to hardware. */
+	uint64_t submitted;
+	/** Count of operations which were completed, including successful and
+	 * failed completions.
+	 */
+	uint64_t completed;
+	/** Count of operations which failed to complete. */
+	uint64_t errors;
+};
+
+/**
+ * Special ID, which is used to represent all virtual DMA channels.
+ *
+ * @see rte_dma_stats_get
+ * @see rte_dma_stats_reset
+ */
+#define RTE_DMA_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMA_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dma_stats
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stats_get(int16_t dev_id, uint16_t vchan,
+		      struct rte_dma_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMA_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stats_reset(int16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_dump(int16_t dev_id, FILE *f);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
index bb09382dce..5fcf0f60b8 100644
--- a/lib/dmadev/rte_dmadev_pmd.h
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -20,6 +20,62 @@
 extern "C" {
 #endif
 
+struct rte_dma_dev;
+
+/** @internal Used to get device information of a device. */
+typedef int (*rte_dma_info_get_t)(const struct rte_dma_dev *dev,
+				  struct rte_dma_info *dev_info,
+				  uint32_t info_sz);
+
+/** @internal Used to configure a device. */
+typedef int (*rte_dma_configure_t)(struct rte_dma_dev *dev,
+				   const struct rte_dma_conf *dev_conf,
+				   uint32_t conf_sz);
+
+/** @internal Used to start a configured device. */
+typedef int (*rte_dma_start_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to stop a configured device. */
+typedef int (*rte_dma_stop_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to close a configured device. */
+typedef int (*rte_dma_close_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to allocate and set up a virtual DMA channel. */
+typedef int (*rte_dma_vchan_setup_t)(struct rte_dma_dev *dev, uint16_t vchan,
+				const struct rte_dma_vchan_conf *conf,
+				uint32_t conf_sz);
+
+/** @internal Used to retrieve basic statistics. */
+typedef int (*rte_dma_stats_get_t)(const struct rte_dma_dev *dev,
+			uint16_t vchan, struct rte_dma_stats *stats,
+			uint32_t stats_sz);
+
+/** @internal Used to reset basic statistics. */
+typedef int (*rte_dma_stats_reset_t)(struct rte_dma_dev *dev, uint16_t vchan);
+
+/** @internal Used to dump internal information. */
+typedef int (*rte_dma_dump_t)(const struct rte_dma_dev *dev, FILE *f);
+
+/**
+ * DMA device operations function pointer table.
+ *
+ * @see struct rte_dma_dev:dev_ops
+ */
+struct rte_dma_dev_ops {
+	rte_dma_info_get_t         dev_info_get;
+	rte_dma_configure_t        dev_configure;
+	rte_dma_start_t            dev_start;
+	rte_dma_stop_t             dev_stop;
+	rte_dma_close_t            dev_close;
+
+	rte_dma_vchan_setup_t      vchan_setup;
+
+	rte_dma_stats_get_t        stats_get;
+	rte_dma_stats_reset_t      stats_reset;
+
+	rte_dma_dump_t             dev_dump;
+};
 /**
  * Possible states of a DMA device.
  *
@@ -44,7 +100,12 @@ struct rte_dma_dev {
 	void *dev_private; /**< PMD-specific private data. */
 	/** Device info which supplied during device initialization. */
 	struct rte_device *device;
+	/** Functions implemented by PMD. */
+	const struct rte_dma_dev_ops *dev_ops;
+	struct rte_dma_conf dev_conf; /**< DMA device configuration. */
 	enum rte_dma_dev_state state; /**< Flag indicating the device state. */
+	__extension__
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
 	uint64_t reserved[2]; /**< Reserved for future fields. */
 } __rte_cache_aligned;
 
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index f8a0076468..e925dfcd6d 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -1,10 +1,19 @@
 EXPERIMENTAL {
 	global:
 
+	rte_dma_close;
+	rte_dma_configure;
 	rte_dma_count_avail;
 	rte_dma_dev_max;
+	rte_dma_dump;
 	rte_dma_get_dev_id_by_name;
+	rte_dma_info_get;
 	rte_dma_is_valid;
+	rte_dma_start;
+	rte_dma_stats_get;
+	rte_dma_stats_reset;
+	rte_dma_stop;
+	rte_dma_vchan_setup;
 
 	local: *;
 };
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v24 3/6] dmadev: add data plane API support
  2021-10-09  9:33 ` [dpdk-dev] [PATCH v24 0/6] support dmadev Chengwen Feng
  2021-10-09  9:33   ` [dpdk-dev] [PATCH v24 1/6] dmadev: introduce DMA device library Chengwen Feng
  2021-10-09  9:33   ` [dpdk-dev] [PATCH v24 2/6] dmadev: add control plane API support Chengwen Feng
@ 2021-10-09  9:33   ` Chengwen Feng
  2021-10-09 10:03     ` fengchengwen
  2021-10-11 10:40     ` Bruce Richardson
  2021-10-09  9:33   ` [dpdk-dev] [PATCH v24 4/6] dmadev: add multi-process support Chengwen Feng
                     ` (2 subsequent siblings)
  5 siblings, 2 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-10-09  9:33 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch add data plane API for dmadev.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 doc/guides/prog_guide/dmadev.rst       |  22 ++
 doc/guides/rel_notes/release_21_11.rst |   2 +-
 lib/dmadev/meson.build                 |   1 +
 lib/dmadev/rte_dmadev.c                | 134 ++++++++
 lib/dmadev/rte_dmadev.h                | 451 +++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h           |  78 +++++
 lib/dmadev/rte_dmadev_pmd.h            |   7 +
 lib/dmadev/version.map                 |   6 +
 8 files changed, 700 insertions(+), 1 deletion(-)
 create mode 100644 lib/dmadev/rte_dmadev_core.h

diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
index 5c70ad3d6a..2e2a4bb62a 100644
--- a/doc/guides/prog_guide/dmadev.rst
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -96,3 +96,25 @@ can be used to get the device info and supported features.
 
 Silent mode is a special device capability which does not require the
 application to invoke dequeue APIs.
+
+
+Enqueue / Dequeue APIs
+~~~~~~~~~~~~~~~~~~~~~~
+
+Enqueue APIs such as ``rte_dma_copy`` and ``rte_dma_fill`` can be used to
+enqueue operations to hardware. If an enqueue is successful, a ``ring_idx`` is
+returned. This ``ring_idx`` can be used by applications to track per operation
+metadata in an application-defined circular ring.
+
+The ``rte_dma_submit`` API is used to issue doorbell to hardware.
+Alternatively the ``RTE_DMA_OP_FLAG_SUBMIT`` flag can be passed to the enqueue
+APIs to also issue the doorbell to hardware.
+
+There are two dequeue APIs ``rte_dma_completed`` and
+``rte_dma_completed_status``, these are used to obtain the results of the
+enqueue requests. ``rte_dma_completed`` will return the number of successfully
+completed operations. ``rte_dma_completed_status`` will return the number of
+completed operations along with the status of each operation (filled into the
+``status`` array passed by user). These two APIs can also return the last
+completed operation's ``ring_idx`` which could help user track operations within
+their own application-defined rings.
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index f935a3f395..d1d7abf694 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -144,7 +144,7 @@ New Features
 * **Introduced dmadev library with:**
 
   * Device allocation functions.
-  * Control plane API.
+  * Control and data plane API.
 
 
 Removed Items
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index f8d54c6e74..d2fc85e8c7 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -3,4 +3,5 @@
 
 sources = files('rte_dmadev.c')
 headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
 driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
index a6a5680d2b..891ceeb988 100644
--- a/lib/dmadev/rte_dmadev.c
+++ b/lib/dmadev/rte_dmadev.c
@@ -17,6 +17,7 @@
 
 static int16_t dma_devices_max;
 
+struct rte_dma_fp_object *rte_dma_fp_objs;
 struct rte_dma_dev *rte_dma_devices;
 
 RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
@@ -97,6 +98,38 @@ dma_find_by_name(const char *name)
 	return NULL;
 }
 
+static void dma_fp_object_reset(int16_t dev_id);
+
+static int
+dma_fp_data_prepare(void)
+{
+	size_t size;
+	void *ptr;
+	int i;
+
+	if (rte_dma_fp_objs != NULL)
+		return 0;
+
+	/* Fast-path object must align cacheline, but the return value of malloc
+	 * may not be aligned to the cache line. Therefore, extra memory is
+	 * applied for realignment.
+	 * note: We do not call posix_memalign/aligned_alloc because it is
+	 * version dependent on libc.
+	 */
+	size = dma_devices_max * sizeof(struct rte_dma_fp_object) +
+		RTE_CACHE_LINE_SIZE;
+	ptr = malloc(size);
+	if (ptr == NULL)
+		return -ENOMEM;
+	memset(ptr, 0, size);
+
+	rte_dma_fp_objs = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE);
+	for (i = 0; i < dma_devices_max; i++)
+		dma_fp_object_reset(i);
+
+	return 0;
+}
+
 static int
 dma_dev_data_prepare(void)
 {
@@ -117,8 +150,15 @@ dma_dev_data_prepare(void)
 static int
 dma_data_prepare(void)
 {
+	int ret;
+
 	if (dma_devices_max == 0)
 		dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
+
+	ret = dma_fp_data_prepare();
+	if (ret)
+		return ret;
+
 	return dma_dev_data_prepare();
 }
 
@@ -317,6 +357,8 @@ rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
 	return ret;
 }
 
+static void dma_fp_object_setup(int16_t dev_id,	const struct rte_dma_dev *dev);
+
 int
 rte_dma_start(int16_t dev_id)
 {
@@ -344,6 +386,7 @@ rte_dma_start(int16_t dev_id)
 		return ret;
 
 mark_started:
+	dma_fp_object_setup(dev_id, dev);
 	dev->dev_started = 1;
 	return 0;
 }
@@ -370,6 +413,7 @@ rte_dma_stop(int16_t dev_id)
 		return ret;
 
 mark_stopped:
+	dma_fp_object_reset(dev_id);
 	dev->dev_started = 0;
 	return 0;
 }
@@ -604,3 +648,93 @@ rte_dma_dump(int16_t dev_id, FILE *f)
 
 	return 0;
 }
+
+static int
+dummy_copy(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
+	   __rte_unused rte_iova_t src, __rte_unused rte_iova_t dst,
+	   __rte_unused uint32_t length, __rte_unused uint64_t flags)
+{
+	RTE_DMA_LOG(ERR, "copy is not configured or not supported.");
+	return -EINVAL;
+}
+
+static int
+dummy_copy_sg(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
+	      __rte_unused const struct rte_dma_sge *src,
+	      __rte_unused const struct rte_dma_sge *dst,
+	      __rte_unused uint16_t nb_src, __rte_unused uint16_t nb_dst,
+	      __rte_unused uint64_t flags)
+{
+	RTE_DMA_LOG(ERR, "copy_sg is not configured or not supported.");
+	return -EINVAL;
+}
+
+static int
+dummy_fill(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
+	   __rte_unused uint64_t pattern, __rte_unused rte_iova_t dst,
+	   __rte_unused uint32_t length, __rte_unused uint64_t flags)
+{
+	RTE_DMA_LOG(ERR, "fill is not configured or not supported.");
+	return -EINVAL;
+}
+
+static int
+dummy_submit(__rte_unused void *dev_private, __rte_unused uint16_t vchan)
+{
+	RTE_DMA_LOG(ERR, "submit is not configured or not supported.");
+	return -EINVAL;
+}
+
+static uint16_t
+dummy_completed(__rte_unused void *dev_private,	__rte_unused uint16_t vchan,
+		__rte_unused const uint16_t nb_cpls,
+		__rte_unused uint16_t *last_idx, __rte_unused bool *has_error)
+{
+	RTE_DMA_LOG(ERR, "completed is not configured or not supported.");
+	return 0;
+}
+
+static uint16_t
+dummy_completed_status(__rte_unused void *dev_private,
+		       __rte_unused uint16_t vchan,
+		       __rte_unused const uint16_t nb_cpls,
+		       __rte_unused uint16_t *last_idx,
+		       __rte_unused enum rte_dma_status_code *status)
+{
+	RTE_DMA_LOG(ERR,
+		    "completed_status is not configured or not supported.");
+	return 0;
+}
+
+static void
+dma_fp_object_reset(int16_t dev_id)
+{
+	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+
+	obj->copy             = dummy_copy;
+	obj->copy_sg          = dummy_copy_sg;
+	obj->fill             = dummy_fill;
+	obj->submit           = dummy_submit;
+	obj->completed        = dummy_completed;
+	obj->completed_status = dummy_completed_status;
+}
+
+static void
+dma_fp_object_setup(int16_t dev_id, const struct rte_dma_dev *dev)
+{
+	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+
+	obj->dev_private = dev->dev_private;
+	if (dev->dev_ops->copy)
+		obj->copy = dev->dev_ops->copy;
+	if (dev->dev_ops->copy_sg)
+		obj->copy_sg = dev->dev_ops->copy_sg;
+	if (dev->dev_ops->fill)
+		obj->fill = dev->dev_ops->fill;
+	if (dev->dev_ops->submit)
+		obj->submit = dev->dev_ops->submit;
+	if (dev->dev_ops->completed)
+		obj->completed = dev->dev_ops->completed;
+	if (dev->dev_ops->completed_status)
+		obj->completed_status = dev->dev_ops->completed_status;
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 34a4c26851..95b6a0a810 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -65,6 +65,77 @@
  * Finally, an application can close a dmadev by invoking the rte_dma_close()
  * function.
  *
+ * The dataplane APIs include two parts:
+ * The first part is the submission of operation requests:
+ *     - rte_dma_copy()
+ *     - rte_dma_copy_sg()
+ *     - rte_dma_fill()
+ *     - rte_dma_submit()
+ *
+ * These APIs could work with different virtual DMA channels which have
+ * different contexts.
+ *
+ * The first three APIs are used to submit the operation request to the virtual
+ * DMA channel, if the submission is successful, a positive
+ * ring_idx <= UINT16_MAX is returned, otherwise a negative number is returned.
+ *
+ * The last API is used to issue doorbell to hardware, and also there are flags
+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
+ * same work.
+ * @note When enqueuing a set of jobs to the device, having a separate submit
+ * outside a loop makes for clearer code than having a check for the last
+ * iteration inside the loop to set a special submit flag.  However, for cases
+ * where one item alone is to be submitted or there is a small set of jobs to
+ * be submitted sequentially, having a submit flag provides a lower-overhead
+ * way of doing the submission while still keeping the code clean.
+ *
+ * The second part is to obtain the result of requests:
+ *     - rte_dma_completed()
+ *         - return the number of operation requests completed successfully.
+ *     - rte_dma_completed_status()
+ *         - return the number of operation requests completed.
+ *
+ * @note If the dmadev works in silent mode (@see RTE_DMA_CAPA_SILENT),
+ * application does not invoke the above two completed APIs.
+ *
+ * About the ring_idx which enqueue APIs (e.g. rte_dma_copy(), rte_dma_fill())
+ * return, the rules are as follows:
+ *     - ring_idx for each virtual DMA channel are independent.
+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *       when it reach UINT16_MAX, it wraps back to zero.
+ *     - This ring_idx can be used by applications to track per-operation
+ *       metadata in an application-defined circular ring.
+ *     - The initial ring_idx of a virtual DMA channel is zero, after the
+ *       device is stopped, the ring_idx needs to be reset to zero.
+ *
+ * One example:
+ *     - step-1: start one dmadev
+ *     - step-2: enqueue a copy operation, the ring_idx return is 0
+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1
+ *     - ...
+ *     - step-101: stop the dmadev
+ *     - step-102: start the dmadev
+ *     - step-103: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *
+ * The DMA operation address used in enqueue APIs (i.e. rte_dma_copy(),
+ * rte_dma_copy_sg(), rte_dma_fill()) is defined as rte_iova_t type.
+ *
+ * The dmadev supports two types of address: memory address and device address.
+ *
+ * - memory address: the source and destination address of the memory-to-memory
+ * transfer type, or the source address of the memory-to-device transfer type,
+ * or the destination address of the device-to-memory transfer type.
+ * @note If the device support SVA (@see RTE_DMA_CAPA_SVA), the memory address
+ * can be any VA address, otherwise it must be an IOVA address.
+ *
+ * - device address: the source and destination address of the device-to-device
+ * transfer type, or the source address of the device-to-memory transfer type,
+ * or the destination address of the memory-to-device transfer type.
+ *
  * About MT-safe, all the functions of the dmadev API implemented by a PMD are
  * lock-free functions which assume to not be invoked in parallel on different
  * logical cores to work on the same target dmadev object.
@@ -590,6 +661,386 @@ int rte_dma_stats_reset(int16_t dev_id, uint16_t vchan);
 __rte_experimental
 int rte_dma_dump(int16_t dev_id, FILE *f);
 
+/**
+ * DMA transfer result status code defines.
+ *
+ * @see rte_dma_completed_status
+ */
+enum rte_dma_status_code {
+	/** The operation completed successfully. */
+	RTE_DMA_STATUS_SUCCESSFUL,
+	/** The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_USER_ABORT,
+	/** The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPTED,
+	/** The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/** The operation failed to complete due invalid destination address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/** The operation failed to complete due invalid source or destination
+	 * address, cover the case that only knows the address error, but not
+	 * sure which address error.
+	 */
+	RTE_DMA_STATUS_INVALID_ADDR,
+	/** The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/** The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/** The operation failed to complete due bus read error. */
+	RTE_DMA_STATUS_BUS_READ_ERROR,
+	/** The operation failed to complete due bus write error. */
+	RTE_DMA_STATUS_BUS_WRITE_ERROR,
+	/** The operation failed to complete due bus error, cover the case that
+	 * only knows the bus error, but not sure which direction error.
+	 */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/** The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DATA_POISION,
+	/** The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/** The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the memory-to-device/
+	 * device-to-memory/device-to-device transfer scenario.
+	 */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/** The operation failed to complete due lookup page fault. */
+	RTE_DMA_STATUS_PAGE_FAULT,
+	/** The operation failed to complete due unknown reason.
+	 * The initial value is 256, which reserves space for future errors.
+	 */
+	RTE_DMA_STATUS_ERROR_UNKNOWN = 0x100,
+};
+
+/**
+ * A structure used to hold scatter-gather DMA operation request entry.
+ *
+ * @see rte_dma_copy_sg
+ */
+struct rte_dma_sge {
+	rte_iova_t addr; /**< The DMA operation address. */
+	uint32_t length; /**< The DMA operation length. */
+};
+
+#include "rte_dmadev_core.h"
+
+/**@{@name DMA operation flag
+ * @see rte_dma_copy()
+ * @see rte_dma_copy_sg()
+ * @see rte_dma_fill()
+ */
+#define RTE_DMA_OP_FLAG_FENCE	RTE_BIT64(0)
+/**< Fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ * If the specify DMA HW works in-order (it means it has default fence between
+ * operations), this flag could be NOP.
+ */
+#define RTE_DMA_OP_FLAG_SUBMIT	RTE_BIT64(1)
+/**< Submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ */
+#define RTE_DMA_OP_FLAG_LLC	RTE_BIT64(2)
+/**< Write data to low level cache hint.
+ * Used for performance optimization, this is just a hint, and there is no
+ * capability bit for this, driver should not return error if this flag was set.
+ */
+/**@}*/
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+static inline int
+rte_dma_copy(int16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+	     uint32_t length, uint64_t flags)
+{
+	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*obj->copy, -ENOTSUP);
+#endif
+
+	return (*obj->copy)(obj->dev_private, vchan, src, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter-gather list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter-gather list copy operation to be performed by
+ * hardware, if the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then
+ * trigger doorbell to begin this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The pointer of source scatter-gather entry array.
+ * @param dst
+ *   The pointer of destination scatter-gather entry array.
+ * @param nb_src
+ *   The number of source scatter-gather entry.
+ *   @see struct rte_dma_info::max_sges
+ * @param nb_dst
+ *   The number of destination scatter-gather entry.
+ *   @see struct rte_dma_info::max_sges
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+static inline int
+rte_dma_copy_sg(int16_t dev_id, uint16_t vchan, struct rte_dma_sge *src,
+		struct rte_dma_sge *dst, uint16_t nb_src, uint16_t nb_dst,
+		uint64_t flags)
+{
+	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || src == NULL || dst == NULL ||
+	    nb_src == 0 || nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*obj->copy_sg, -ENOTSUP);
+#endif
+
+	return (*obj->copy_sg)(obj->dev_private, vchan, src, dst, nb_src,
+			       nb_dst, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+static inline int
+rte_dma_fill(int16_t dev_id, uint16_t vchan, uint64_t pattern,
+	     rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*obj->fill, -ENOTSUP);
+#endif
+
+	return (*obj->fill)(obj->dev_private, vchan, pattern, dst, length,
+			    flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dma_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+static inline int
+rte_dma_submit(int16_t dev_id, uint16_t vchan)
+{
+	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id))
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*obj->submit, -ENOTSUP);
+#endif
+
+	return (*obj->submit)(obj->dev_private, vchan);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Return the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's ring_idx.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed. This return value
+ *   must be less than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dma_completed(int16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		  uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || nb_cpls == 0)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*obj->completed, 0);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*obj->completed)(obj->dev_private, vchan, nb_cpls, last_idx,
+				 has_error);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Return the number of operations that have been completed, and the operations
+ * result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's ring_idx.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   This is a pointer to an array of length 'nb_cpls' that holds the completion
+ *   status code of each operation.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that completed. This return value must be less
+ *   than or equal to the value of nb_cpls.
+ *   If this number is greater than zero (assuming n), then n values in the
+ *   status array are also set.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dma_completed_status(int16_t dev_id, uint16_t vchan,
+			 const uint16_t nb_cpls, uint16_t *last_idx,
+			 enum rte_dma_status_code *status)
+{
+	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || nb_cpls == 0 || status == NULL)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*obj->completed_status, 0);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*obj->completed_status)(obj->dev_private, vchan, nb_cpls,
+					last_idx, status);
+}
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000000..be08faa202
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#ifndef RTE_DMADEV_CORE_H
+#define RTE_DMADEV_CORE_H
+
+/**
+ * @file
+ *
+ * DMA Device internal header.
+ *
+ * This header contains internal data types which are used by dataplane inline
+ * function.
+ *
+ * Applications should not use these functions directly.
+ */
+
+/** @internal Used to enqueue a copy operation. */
+typedef int (*rte_dma_copy_t)(void *dev_private, uint16_t vchan,
+			      rte_iova_t src, rte_iova_t dst,
+			      uint32_t length, uint64_t flags);
+
+/** @internal Used to enqueue a scatter-gather list copy operation. */
+typedef int (*rte_dma_copy_sg_t)(void *dev_private, uint16_t vchan,
+				 const struct rte_dma_sge *src,
+				 const struct rte_dma_sge *dst,
+				 uint16_t nb_src, uint16_t nb_dst,
+				 uint64_t flags);
+
+/** @internal Used to enqueue a fill operation. */
+typedef int (*rte_dma_fill_t)(void *dev_private, uint16_t vchan,
+			      uint64_t pattern, rte_iova_t dst,
+			      uint32_t length, uint64_t flags);
+
+/** @internal Used to trigger hardware to begin working. */
+typedef int (*rte_dma_submit_t)(void *dev_private, uint16_t vchan);
+
+/** @internal Used to return number of successful completed operations. */
+typedef uint16_t (*rte_dma_completed_t)(void *dev_private,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+
+/** @internal Used to return number of completed operations. */
+typedef uint16_t (*rte_dma_completed_status_t)(void *dev_private,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+
+/**
+ * @internal
+ * Fast-path dmadev functions and related data are hold in a flat array.
+ * One entry per dmadev.
+ *
+ * On 64-bit systems contents of this structure occupy exactly two 64B lines.
+ * On 32-bit systems contents of this structure fits into one 64B line.
+ *
+ * The 'dev_private' field was placed in the first cache line to optimize
+ * performance because the PMD driver mainly depends on this field.
+ */
+struct rte_dma_fp_object {
+	void *dev_private; /**< PMD-specific private data. */
+	rte_dma_copy_t             copy;
+	rte_dma_copy_sg_t          copy_sg;
+	rte_dma_fill_t             fill;
+	rte_dma_submit_t           submit;
+	rte_dma_completed_t        completed;
+	rte_dma_completed_status_t completed_status;
+	void *reserved_cl0;
+	/** Reserve space for future IO functions, while keeping data and
+	 * dev_ops pointers on the second cacheline.
+	 */
+	void *reserved_cl1[6];
+} __rte_cache_aligned;
+
+extern struct rte_dma_fp_object *rte_dma_fp_objs;
+
+#endif /* RTE_DMADEV_CORE_H */
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
index 5fcf0f60b8..07056b45e7 100644
--- a/lib/dmadev/rte_dmadev_pmd.h
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -75,6 +75,13 @@ struct rte_dma_dev_ops {
 	rte_dma_stats_reset_t      stats_reset;
 
 	rte_dma_dump_t             dev_dump;
+
+	rte_dma_copy_t             copy;
+	rte_dma_copy_sg_t          copy_sg;
+	rte_dma_fill_t             fill;
+	rte_dma_submit_t           submit;
+	rte_dma_completed_t        completed;
+	rte_dma_completed_status_t completed_status;
 };
 /**
  * Possible states of a DMA device.
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index e925dfcd6d..4d40104689 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -2,10 +2,15 @@ EXPERIMENTAL {
 	global:
 
 	rte_dma_close;
+	rte_dma_completed;
+	rte_dma_completed_status;
 	rte_dma_configure;
+	rte_dma_copy;
+	rte_dma_copy_sg;
 	rte_dma_count_avail;
 	rte_dma_dev_max;
 	rte_dma_dump;
+	rte_dma_fill;
 	rte_dma_get_dev_id_by_name;
 	rte_dma_info_get;
 	rte_dma_is_valid;
@@ -13,6 +18,7 @@ EXPERIMENTAL {
 	rte_dma_stats_get;
 	rte_dma_stats_reset;
 	rte_dma_stop;
+	rte_dma_submit;
 	rte_dma_vchan_setup;
 
 	local: *;
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v24 4/6] dmadev: add multi-process support
  2021-10-09  9:33 ` [dpdk-dev] [PATCH v24 0/6] support dmadev Chengwen Feng
                     ` (2 preceding siblings ...)
  2021-10-09  9:33   ` [dpdk-dev] [PATCH v24 3/6] dmadev: add data " Chengwen Feng
@ 2021-10-09  9:33   ` Chengwen Feng
  2021-10-09  9:33   ` [dpdk-dev] [PATCH v24 5/6] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
  2021-10-09  9:33   ` [dpdk-dev] [PATCH v24 6/6] app/test: add dmadev API test Chengwen Feng
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-10-09  9:33 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch add multi-process support for dmadev.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 doc/guides/rel_notes/release_21_11.rst |   1 +
 lib/dmadev/rte_dmadev.c                | 176 ++++++++++++++++++++-----
 lib/dmadev/rte_dmadev_pmd.h            |  29 +++-
 3 files changed, 163 insertions(+), 43 deletions(-)

diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index d1d7abf694..af32fce1ed 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -145,6 +145,7 @@ New Features
 
   * Device allocation functions.
   * Control and data plane API.
+  * Multi-process support.
 
 
 Removed Items
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
index 891ceeb988..2dba676b2b 100644
--- a/lib/dmadev/rte_dmadev.c
+++ b/lib/dmadev/rte_dmadev.c
@@ -19,6 +19,13 @@ static int16_t dma_devices_max;
 
 struct rte_dma_fp_object *rte_dma_fp_objs;
 struct rte_dma_dev *rte_dma_devices;
+static struct {
+	/* Hold the dev_max information of the primary process. This field is
+	 * set by the primary process and is read by the secondary process.
+	 */
+	int16_t dev_max;
+	struct rte_dma_dev_data data[0];
+} *dma_devices_shared_data;
 
 RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
 #define RTE_DMA_LOG(level, ...) \
@@ -70,11 +77,11 @@ dma_find_free_id(void)
 {
 	int16_t i;
 
-	if (rte_dma_devices == NULL)
+	if (rte_dma_devices == NULL || dma_devices_shared_data == NULL)
 		return -1;
 
 	for (i = 0; i < dma_devices_max; i++) {
-		if (rte_dma_devices[i].state == RTE_DMA_DEV_UNUSED)
+		if (dma_devices_shared_data->data[i].dev_name[0] == '\0')
 			return i;
 	}
 
@@ -91,7 +98,7 @@ dma_find_by_name(const char *name)
 
 	for (i = 0; i < dma_devices_max; i++) {
 		if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) &&
-		    (!strcmp(name, rte_dma_devices[i].dev_name)))
+		    (!strcmp(name, rte_dma_devices[i].data->dev_name)))
 			return &rte_dma_devices[i];
 	}
 
@@ -147,23 +154,71 @@ dma_dev_data_prepare(void)
 	return 0;
 }
 
+static int
+dma_shared_data_prepare(void)
+{
+	const char *mz_name = "rte_dma_dev_data";
+	const struct rte_memzone *mz;
+	size_t size;
+
+	if (dma_devices_shared_data != NULL)
+		return 0;
+
+	size = sizeof(*dma_devices_shared_data) +
+		sizeof(struct rte_dma_dev_data) * dma_devices_max;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		mz = rte_memzone_reserve(mz_name, size, rte_socket_id(), 0);
+	else
+		mz = rte_memzone_lookup(mz_name);
+	if (mz == NULL)
+		return -ENOMEM;
+
+	dma_devices_shared_data = mz->addr;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		memset(dma_devices_shared_data, 0, size);
+		dma_devices_shared_data->dev_max = dma_devices_max;
+	} else {
+		dma_devices_max = dma_devices_shared_data->dev_max;
+	}
+
+	return 0;
+}
+
 static int
 dma_data_prepare(void)
 {
 	int ret;
 
-	if (dma_devices_max == 0)
-		dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
-
-	ret = dma_fp_data_prepare();
-	if (ret)
-		return ret;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		if (dma_devices_max == 0)
+			dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
+		ret = dma_fp_data_prepare();
+		if (ret)
+			return ret;
+		ret = dma_dev_data_prepare();
+		if (ret)
+			return ret;
+		ret = dma_shared_data_prepare();
+		if (ret)
+			return ret;
+	} else {
+		ret = dma_shared_data_prepare();
+		if (ret)
+			return ret;
+		ret = dma_fp_data_prepare();
+		if (ret)
+			return ret;
+		ret = dma_dev_data_prepare();
+		if (ret)
+			return ret;
+	}
 
-	return dma_dev_data_prepare();
+	return 0;
 }
 
 static struct rte_dma_dev *
-dma_allocate(const char *name, int numa_node, size_t private_data_size)
+dma_allocate_primary(const char *name, int numa_node, size_t private_data_size)
 {
 	struct rte_dma_dev *dev;
 	void *dev_private;
@@ -197,10 +252,54 @@ dma_allocate(const char *name, int numa_node, size_t private_data_size)
 	}
 
 	dev = &rte_dma_devices[dev_id];
-	rte_strscpy(dev->dev_name, name, sizeof(dev->dev_name));
-	dev->dev_id = dev_id;
-	dev->numa_node = numa_node;
-	dev->dev_private = dev_private;
+	dev->data = &dma_devices_shared_data->data[dev_id];
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+	dev->data->dev_id = dev_id;
+	dev->data->numa_node = numa_node;
+	dev->data->dev_private = dev_private;
+
+	return dev;
+}
+
+static struct rte_dma_dev *
+dma_attach_secondary(const char *name)
+{
+	struct rte_dma_dev *dev;
+	int16_t i;
+	int ret;
+
+	ret = dma_data_prepare();
+	if (ret < 0) {
+		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
+		return NULL;
+	}
+
+	for (i = 0; i < dma_devices_max; i++) {
+		if (!strcmp(dma_devices_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == dma_devices_max) {
+		RTE_DMA_LOG(ERR,
+			"Device %s is not driven by the primary process",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dma_devices[i];
+	dev->data = &dma_devices_shared_data->data[i];
+
+	return dev;
+}
+
+static struct rte_dma_dev *
+dma_allocate(const char *name, int numa_node, size_t private_data_size)
+{
+	struct rte_dma_dev *dev;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dma_allocate_primary(name, numa_node, private_data_size);
+	else
+		dev = dma_attach_secondary(name);
 
 	return dev;
 }
@@ -208,7 +307,11 @@ dma_allocate(const char *name, int numa_node, size_t private_data_size)
 static void
 dma_release(struct rte_dma_dev *dev)
 {
-	rte_free(dev->dev_private);
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		rte_free(dev->data->dev_private);
+		memset(dev->data, 0, sizeof(struct rte_dma_dev_data));
+	}
+
 	memset(dev, 0, sizeof(struct rte_dma_dev));
 }
 
@@ -242,7 +345,7 @@ rte_dma_pmd_release(const char *name)
 		return -EINVAL;
 
 	if (dev->state == RTE_DMA_DEV_READY)
-		return rte_dma_close(dev->dev_id);
+		return rte_dma_close(dev->data->dev_id);
 
 	dma_release(dev);
 	return 0;
@@ -260,7 +363,7 @@ rte_dma_get_dev_id_by_name(const char *name)
 	if (dev == NULL)
 		return -EINVAL;
 
-	return dev->dev_id;
+	return dev->data->dev_id;
 }
 
 bool
@@ -305,7 +408,7 @@ rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
 		return ret;
 
 	dev_info->numa_node = dev->device->numa_node;
-	dev_info->nb_vchans = dev->dev_conf.nb_vchans;
+	dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
 
 	return 0;
 }
@@ -320,7 +423,7 @@ rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
 	if (!rte_dma_is_valid(dev_id) || dev_conf == NULL)
 		return -EINVAL;
 
-	if (dev->dev_started != 0) {
+	if (dev->data->dev_started != 0) {
 		RTE_DMA_LOG(ERR,
 			"Device %d must be stopped to allow configuration",
 			dev_id);
@@ -352,7 +455,8 @@ rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
 	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
 					     sizeof(struct rte_dma_conf));
 	if (ret == 0)
-		memcpy(&dev->dev_conf, dev_conf, sizeof(struct rte_dma_conf));
+		memcpy(&dev->data->dev_conf, dev_conf,
+		       sizeof(struct rte_dma_conf));
 
 	return ret;
 }
@@ -368,12 +472,12 @@ rte_dma_start(int16_t dev_id)
 	if (!rte_dma_is_valid(dev_id))
 		return -EINVAL;
 
-	if (dev->dev_conf.nb_vchans == 0) {
+	if (dev->data->dev_conf.nb_vchans == 0) {
 		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
 		return -EINVAL;
 	}
 
-	if (dev->dev_started != 0) {
+	if (dev->data->dev_started != 0) {
 		RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
 		return 0;
 	}
@@ -387,7 +491,7 @@ rte_dma_start(int16_t dev_id)
 
 mark_started:
 	dma_fp_object_setup(dev_id, dev);
-	dev->dev_started = 1;
+	dev->data->dev_started = 1;
 	return 0;
 }
 
@@ -400,7 +504,7 @@ rte_dma_stop(int16_t dev_id)
 	if (!rte_dma_is_valid(dev_id))
 		return -EINVAL;
 
-	if (dev->dev_started == 0) {
+	if (dev->data->dev_started == 0) {
 		RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
 		return 0;
 	}
@@ -414,7 +518,7 @@ rte_dma_stop(int16_t dev_id)
 
 mark_stopped:
 	dma_fp_object_reset(dev_id);
-	dev->dev_started = 0;
+	dev->data->dev_started = 0;
 	return 0;
 }
 
@@ -428,7 +532,7 @@ rte_dma_close(int16_t dev_id)
 		return -EINVAL;
 
 	/* Device must be stopped before it can be closed */
-	if (dev->dev_started == 1) {
+	if (dev->data->dev_started == 1) {
 		RTE_DMA_LOG(ERR,
 			"Device %d must be stopped before closing", dev_id);
 		return -EBUSY;
@@ -454,7 +558,7 @@ rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
 	if (!rte_dma_is_valid(dev_id) || conf == NULL)
 		return -EINVAL;
 
-	if (dev->dev_started != 0) {
+	if (dev->data->dev_started != 0) {
 		RTE_DMA_LOG(ERR,
 			"Device %d must be stopped to allow configuration",
 			dev_id);
@@ -466,7 +570,7 @@ rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
 		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
 		return -EINVAL;
 	}
-	if (dev->dev_conf.nb_vchans == 0) {
+	if (dev->data->dev_conf.nb_vchans == 0) {
 		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
 		return -EINVAL;
 	}
@@ -540,7 +644,7 @@ rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
 	if (!rte_dma_is_valid(dev_id) || stats == NULL)
 		return -EINVAL;
 
-	if (vchan >= dev->dev_conf.nb_vchans &&
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
 	    vchan != RTE_DMA_ALL_VCHAN) {
 		RTE_DMA_LOG(ERR,
 			"Device %d vchan %u out of range", dev_id, vchan);
@@ -561,7 +665,7 @@ rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
 	if (!rte_dma_is_valid(dev_id))
 		return -EINVAL;
 
-	if (vchan >= dev->dev_conf.nb_vchans &&
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
 	    vchan != RTE_DMA_ALL_VCHAN) {
 		RTE_DMA_LOG(ERR,
 			"Device %d vchan %u out of range", dev_id, vchan);
@@ -634,14 +738,14 @@ rte_dma_dump(int16_t dev_id, FILE *f)
 	}
 
 	(void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
-		dev->dev_id,
-		dev->dev_name,
-		dev->dev_started ? "started" : "stopped");
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
 	dma_dump_capability(f, dev_info.dev_capa);
 	(void)fprintf(f, "  max_vchans_supported: %u\n", dev_info.max_vchans);
 	(void)fprintf(f, "  nb_vchans_configured: %u\n", dev_info.nb_vchans);
 	(void)fprintf(f, "  silent_mode: %s\n",
-		dev->dev_conf.enable_silent ? "on" : "off");
+		dev->data->dev_conf.enable_silent ? "on" : "off");
 
 	if (dev->dev_ops->dev_dump != NULL)
 		return (*dev->dev_ops->dev_dump)(dev, f);
@@ -724,7 +828,7 @@ dma_fp_object_setup(int16_t dev_id, const struct rte_dma_dev *dev)
 {
 	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
 
-	obj->dev_private = dev->dev_private;
+	obj->dev_private = dev->data->dev_private;
 	if (dev->dev_ops->copy)
 		obj->copy = dev->dev_ops->copy;
 	if (dev->dev_ops->copy_sg)
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
index 07056b45e7..c2902eddd9 100644
--- a/lib/dmadev/rte_dmadev_pmd.h
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -83,6 +83,27 @@ struct rte_dma_dev_ops {
 	rte_dma_completed_t        completed;
 	rte_dma_completed_status_t completed_status;
 };
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ *
+ * @see struct rte_dma_dev::data
+ */
+struct rte_dma_dev_data {
+	char dev_name[RTE_DEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	int16_t dev_id; /**< Device [external] identifier. */
+	int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */
+	void *dev_private; /**< PMD-specific private data. */
+	struct rte_dma_conf dev_conf; /**< DMA device configuration. */
+	__extension__
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
 /**
  * Possible states of a DMA device.
  *
@@ -101,18 +122,12 @@ enum rte_dma_dev_state {
  * The generic data structure associated with each DMA device.
  */
 struct rte_dma_dev {
-	char dev_name[RTE_DEV_NAME_MAX_LEN]; /**< Unique identifier name */
-	int16_t dev_id; /**< Device [external] identifier. */
-	int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */
-	void *dev_private; /**< PMD-specific private data. */
 	/** Device info which supplied during device initialization. */
 	struct rte_device *device;
+	struct rte_dma_dev_data *data; /**< Pointer to shared device data. */
 	/** Functions implemented by PMD. */
 	const struct rte_dma_dev_ops *dev_ops;
-	struct rte_dma_conf dev_conf; /**< DMA device configuration. */
 	enum rte_dma_dev_state state; /**< Flag indicating the device state. */
-	__extension__
-	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
 	uint64_t reserved[2]; /**< Reserved for future fields. */
 } __rte_cache_aligned;
 
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v24 5/6] dma/skeleton: introduce skeleton dmadev driver
  2021-10-09  9:33 ` [dpdk-dev] [PATCH v24 0/6] support dmadev Chengwen Feng
                     ` (3 preceding siblings ...)
  2021-10-09  9:33   ` [dpdk-dev] [PATCH v24 4/6] dmadev: add multi-process support Chengwen Feng
@ 2021-10-09  9:33   ` Chengwen Feng
  2021-10-09  9:33   ` [dpdk-dev] [PATCH v24 6/6] app/test: add dmadev API test Chengwen Feng
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-10-09  9:33 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

Skeleton dmadevice driver, on the lines of rawdev skeleton, is for
showcasing of the dmadev library.

Design of skeleton involves a virtual device which is plugged into VDEV
bus on initialization.

Also, enable compilation of dmadev skeleton drivers.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 MAINTAINERS                            |   1 +
 drivers/dma/meson.build                |   4 +-
 drivers/dma/skeleton/meson.build       |   7 +
 drivers/dma/skeleton/skeleton_dmadev.c | 571 +++++++++++++++++++++++++
 drivers/dma/skeleton/skeleton_dmadev.h |  61 +++
 drivers/dma/skeleton/version.map       |   3 +
 6 files changed, 646 insertions(+), 1 deletion(-)
 create mode 100644 drivers/dma/skeleton/meson.build
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
 create mode 100644 drivers/dma/skeleton/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 119cfaa04e..ec887ac49f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -457,6 +457,7 @@ F: doc/guides/regexdevs/features/default.ini
 DMA device API - EXPERIMENTAL
 M: Chengwen Feng <fengchengwen@huawei.com>
 F: lib/dmadev/
+F: drivers/dma/skeleton/
 F: doc/guides/prog_guide/dmadev.rst
 
 Eventdev API
diff --git a/drivers/dma/meson.build b/drivers/dma/meson.build
index a24c56d8ff..d9c7ede32f 100644
--- a/drivers/dma/meson.build
+++ b/drivers/dma/meson.build
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright 2021 HiSilicon Limited
 
-drivers = []
+drivers = [
+        'skeleton',
+]
diff --git a/drivers/dma/skeleton/meson.build b/drivers/dma/skeleton/meson.build
new file mode 100644
index 0000000000..8871b80956
--- /dev/null
+++ b/drivers/dma/skeleton/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited
+
+deps += ['dmadev', 'kvargs', 'ring', 'bus_vdev']
+sources = files(
+        'skeleton_dmadev.c',
+)
diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c
new file mode 100644
index 0000000000..62b1eef2d5
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.c
@@ -0,0 +1,571 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+#include <inttypes.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_cycles.h>
+#include <rte_eal.h>
+#include <rte_kvargs.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+
+#include <rte_dmadev_pmd.h>
+
+#include "skeleton_dmadev.h"
+
+RTE_LOG_REGISTER_DEFAULT(skeldma_logtype, INFO);
+#define SKELDMA_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, skeldma_logtype, "%s(): " fmt "\n", \
+		__func__, ##args)
+
+/* Count of instances, currently only 1 is supported. */
+static uint16_t skeldma_count;
+
+static int
+skeldma_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
+		 uint32_t info_sz)
+{
+#define SKELDMA_MAX_DESC	8192
+#define SKELDMA_MIN_DESC	32
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(info_sz);
+
+	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
+			     RTE_DMA_CAPA_SVA |
+			     RTE_DMA_CAPA_OPS_COPY;
+	dev_info->max_vchans = 1;
+	dev_info->max_desc = SKELDMA_MAX_DESC;
+	dev_info->min_desc = SKELDMA_MIN_DESC;
+
+	return 0;
+}
+
+static int
+skeldma_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf,
+		  uint32_t conf_sz)
+{
+	RTE_SET_USED(dev);
+	RTE_SET_USED(conf);
+	RTE_SET_USED(conf_sz);
+	return 0;
+}
+
+static void *
+cpucopy_thread(void *param)
+{
+#define SLEEP_THRESHOLD		10000
+#define SLEEP_US_VAL		10
+
+	struct rte_dma_dev *dev = param;
+	struct skeldma_hw *hw = dev->data->dev_private;
+	struct skeldma_desc *desc = NULL;
+	int ret;
+
+	while (!hw->exit_flag) {
+		ret = rte_ring_dequeue(hw->desc_running, (void **)&desc);
+		if (ret) {
+			hw->zero_req_count++;
+			if (hw->zero_req_count == 0)
+				hw->zero_req_count = SLEEP_THRESHOLD;
+			if (hw->zero_req_count >= SLEEP_THRESHOLD)
+				rte_delay_us_sleep(SLEEP_US_VAL);
+			continue;
+		}
+
+		hw->zero_req_count = 0;
+		rte_memcpy(desc->dst, desc->src, desc->len);
+		hw->completed_count++;
+		(void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
+	}
+
+	return NULL;
+}
+
+static void
+fflush_ring(struct skeldma_hw *hw, struct rte_ring *ring)
+{
+	struct skeldma_desc *desc = NULL;
+	while (rte_ring_count(ring) > 0) {
+		(void)rte_ring_dequeue(ring, (void **)&desc);
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+}
+
+static int
+skeldma_start(struct rte_dma_dev *dev)
+{
+	struct skeldma_hw *hw = dev->data->dev_private;
+	rte_cpuset_t cpuset;
+	int ret;
+
+	if (hw->desc_mem == NULL) {
+		SKELDMA_LOG(ERR, "Vchan was not setup, start fail!");
+		return -EINVAL;
+	}
+
+	/* Reset the dmadev to a known state, include:
+	 * 1) fflush pending/running/completed ring to empty ring.
+	 * 2) init ring idx to zero.
+	 * 3) init running statistics.
+	 * 4) mark cpucopy task exit_flag to false.
+	 */
+	fflush_ring(hw, hw->desc_pending);
+	fflush_ring(hw, hw->desc_running);
+	fflush_ring(hw, hw->desc_completed);
+	hw->ridx = 0;
+	hw->submitted_count = 0;
+	hw->zero_req_count = 0;
+	hw->completed_count = 0;
+	hw->exit_flag = false;
+
+	rte_mb();
+
+	ret = rte_ctrl_thread_create(&hw->thread, "dma_skeleton", NULL,
+				     cpucopy_thread, dev);
+	if (ret) {
+		SKELDMA_LOG(ERR, "Start cpucopy thread fail!");
+		return -EINVAL;
+	}
+
+	if (hw->lcore_id != -1) {
+		cpuset = rte_lcore_cpuset(hw->lcore_id);
+		ret = pthread_setaffinity_np(hw->thread, sizeof(cpuset),
+					     &cpuset);
+		if (ret)
+			SKELDMA_LOG(WARNING,
+				"Set thread affinity lcore = %d fail!",
+				hw->lcore_id);
+	}
+
+	return 0;
+}
+
+static int
+skeldma_stop(struct rte_dma_dev *dev)
+{
+	struct skeldma_hw *hw = dev->data->dev_private;
+
+	hw->exit_flag = true;
+	rte_delay_ms(1);
+
+	pthread_cancel(hw->thread);
+	pthread_join(hw->thread, NULL);
+
+	return 0;
+}
+
+static int
+vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc)
+{
+	struct skeldma_desc *desc;
+	struct rte_ring *empty;
+	struct rte_ring *pending;
+	struct rte_ring *running;
+	struct rte_ring *completed;
+	uint16_t i;
+
+	desc = rte_zmalloc_socket("dma_skelteon_desc",
+				  nb_desc * sizeof(struct skeldma_desc),
+				  RTE_CACHE_LINE_SIZE, hw->socket_id);
+	if (desc == NULL) {
+		SKELDMA_LOG(ERR, "Malloc dma skeleton desc fail!");
+		return -ENOMEM;
+	}
+
+	empty = rte_ring_create("dma_skeleton_desc_empty", nb_desc,
+				hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	pending = rte_ring_create("dma_skeleton_desc_pending", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	running = rte_ring_create("dma_skeleton_desc_running", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	completed = rte_ring_create("dma_skeleton_desc_completed", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (empty == NULL || pending == NULL || running == NULL ||
+	    completed == NULL) {
+		SKELDMA_LOG(ERR, "Create dma skeleton desc ring fail!");
+		rte_ring_free(empty);
+		rte_ring_free(pending);
+		rte_ring_free(running);
+		rte_ring_free(completed);
+		rte_free(desc);
+		return -ENOMEM;
+	}
+
+	/* The real usable ring size is *count-1* instead of *count* to
+	 * differentiate a free ring from an empty ring.
+	 * @see rte_ring_create
+	 */
+	for (i = 0; i < nb_desc - 1; i++)
+		(void)rte_ring_enqueue(empty, (void *)(desc + i));
+
+	hw->desc_mem = desc;
+	hw->desc_empty = empty;
+	hw->desc_pending = pending;
+	hw->desc_running = running;
+	hw->desc_completed = completed;
+
+	return 0;
+}
+
+static void
+vchan_release(struct skeldma_hw *hw)
+{
+	if (hw->desc_mem == NULL)
+		return;
+
+	rte_free(hw->desc_mem);
+	hw->desc_mem = NULL;
+	rte_ring_free(hw->desc_empty);
+	hw->desc_empty = NULL;
+	rte_ring_free(hw->desc_pending);
+	hw->desc_pending = NULL;
+	rte_ring_free(hw->desc_running);
+	hw->desc_running = NULL;
+	rte_ring_free(hw->desc_completed);
+	hw->desc_completed = NULL;
+}
+
+static int
+skeldma_close(struct rte_dma_dev *dev)
+{
+	/* The device already stopped */
+	vchan_release(dev->data->dev_private);
+	return 0;
+}
+
+static int
+skeldma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
+		    const struct rte_dma_vchan_conf *conf,
+		    uint32_t conf_sz)
+{
+	struct skeldma_hw *hw = dev->data->dev_private;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(conf_sz);
+
+	if (!rte_is_power_of_2(conf->nb_desc)) {
+		SKELDMA_LOG(ERR, "Number of desc must be power of 2!");
+		return -EINVAL;
+	}
+
+	vchan_release(hw);
+	return vchan_setup(hw, conf->nb_desc);
+}
+
+static int
+skeldma_stats_get(const struct rte_dma_dev *dev, uint16_t vchan,
+		  struct rte_dma_stats *stats, uint32_t stats_sz)
+{
+	struct skeldma_hw *hw = dev->data->dev_private;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(stats_sz);
+
+	stats->submitted = hw->submitted_count;
+	stats->completed = hw->completed_count;
+	stats->errors = 0;
+
+	return 0;
+}
+
+static int
+skeldma_stats_reset(struct rte_dma_dev *dev, uint16_t vchan)
+{
+	struct skeldma_hw *hw = dev->data->dev_private;
+
+	RTE_SET_USED(vchan);
+
+	hw->submitted_count = 0;
+	hw->completed_count = 0;
+
+	return 0;
+}
+
+static int
+skeldma_dump(const struct rte_dma_dev *dev, FILE *f)
+{
+#define GET_RING_COUNT(ring)	((ring) ? (rte_ring_count(ring)) : 0)
+
+	struct skeldma_hw *hw = dev->data->dev_private;
+
+	(void)fprintf(f,
+		"    lcore_id: %d\n"
+		"    socket_id: %d\n"
+		"    desc_empty_ring_count: %u\n"
+		"    desc_pending_ring_count: %u\n"
+		"    desc_running_ring_count: %u\n"
+		"    desc_completed_ring_count: %u\n",
+		hw->lcore_id, hw->socket_id,
+		GET_RING_COUNT(hw->desc_empty),
+		GET_RING_COUNT(hw->desc_pending),
+		GET_RING_COUNT(hw->desc_running),
+		GET_RING_COUNT(hw->desc_completed));
+	(void)fprintf(f,
+		"    next_ring_idx: %u\n"
+		"    submitted_count: %" PRIu64 "\n"
+		"    completed_count: %" PRIu64 "\n",
+		hw->ridx, hw->submitted_count, hw->completed_count);
+
+	return 0;
+}
+
+static inline void
+submit(struct skeldma_hw *hw, struct skeldma_desc *desc)
+{
+	uint16_t count = rte_ring_count(hw->desc_pending);
+	struct skeldma_desc *pend_desc = NULL;
+
+	while (count > 0) {
+		(void)rte_ring_dequeue(hw->desc_pending, (void **)&pend_desc);
+		(void)rte_ring_enqueue(hw->desc_running, (void *)pend_desc);
+		count--;
+	}
+
+	if (desc)
+		(void)rte_ring_enqueue(hw->desc_running, (void *)desc);
+}
+
+static int
+skeldma_copy(void *dev_private, uint16_t vchan,
+	     rte_iova_t src, rte_iova_t dst,
+	     uint32_t length, uint64_t flags)
+{
+	struct skeldma_hw *hw = dev_private;
+	struct skeldma_desc *desc;
+	int ret;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(flags);
+
+	ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc);
+	if (ret)
+		return -ENOSPC;
+	desc->src = (void *)(uintptr_t)src;
+	desc->dst = (void *)(uintptr_t)dst;
+	desc->len = length;
+	desc->ridx = hw->ridx;
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT)
+		submit(hw, desc);
+	else
+		(void)rte_ring_enqueue(hw->desc_pending, (void *)desc);
+	hw->submitted_count++;
+
+	return hw->ridx++;
+}
+
+static int
+skeldma_submit(void *dev_private, uint16_t vchan)
+{
+	struct skeldma_hw *hw = dev_private;
+	RTE_SET_USED(vchan);
+	submit(hw, NULL);
+	return 0;
+}
+
+static uint16_t
+skeldma_completed(void *dev_private,
+		  uint16_t vchan, const uint16_t nb_cpls,
+		  uint16_t *last_idx, bool *has_error)
+{
+	struct skeldma_hw *hw = dev_private;
+	struct skeldma_desc *desc = NULL;
+	uint16_t index = 0;
+	uint16_t count;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(has_error);
+
+	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+	while (index < count) {
+		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+		if (index == count - 1)
+			*last_idx = desc->ridx;
+		index++;
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+
+	return count;
+}
+
+static uint16_t
+skeldma_completed_status(void *dev_private,
+			 uint16_t vchan, const uint16_t nb_cpls,
+			 uint16_t *last_idx, enum rte_dma_status_code *status)
+{
+	struct skeldma_hw *hw = dev_private;
+	struct skeldma_desc *desc = NULL;
+	uint16_t index = 0;
+	uint16_t count;
+
+	RTE_SET_USED(vchan);
+
+	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+	while (index < count) {
+		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+		if (index == count - 1)
+			*last_idx = desc->ridx;
+		status[index++] = RTE_DMA_STATUS_SUCCESSFUL;
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+
+	return count;
+}
+
+static const struct rte_dma_dev_ops skeldma_ops = {
+	.dev_info_get     = skeldma_info_get,
+	.dev_configure    = skeldma_configure,
+	.dev_start        = skeldma_start,
+	.dev_stop         = skeldma_stop,
+	.dev_close        = skeldma_close,
+
+	.vchan_setup      = skeldma_vchan_setup,
+
+	.stats_get        = skeldma_stats_get,
+	.stats_reset      = skeldma_stats_reset,
+
+	.dev_dump         = skeldma_dump,
+
+	.copy             = skeldma_copy,
+	.submit           = skeldma_submit,
+	.completed        = skeldma_completed,
+	.completed_status = skeldma_completed_status,
+};
+
+static int
+skeldma_create(const char *name, struct rte_vdev_device *vdev, int lcore_id)
+{
+	struct rte_dma_dev *dev;
+	struct skeldma_hw *hw;
+	int socket_id;
+
+	socket_id = (lcore_id < 0) ? rte_socket_id() :
+				     rte_lcore_to_socket_id(lcore_id);
+	dev = rte_dma_pmd_allocate(name, socket_id, sizeof(struct skeldma_hw));
+	if (dev == NULL) {
+		SKELDMA_LOG(ERR, "Unable to allocate dmadev: %s", name);
+		return -EINVAL;
+	}
+
+	dev->device = &vdev->device;
+	dev->dev_ops = &skeldma_ops;
+
+	hw = dev->data->dev_private;
+	hw->lcore_id = lcore_id;
+	hw->socket_id = socket_id;
+
+	dev->state = RTE_DMA_DEV_READY;
+
+	return dev->data->dev_id;
+}
+
+static int
+skeldma_destroy(const char *name)
+{
+	return rte_dma_pmd_release(name);
+}
+
+static int
+skeldma_parse_lcore(const char *key __rte_unused,
+		    const char *value,
+		    void *opaque)
+{
+	int lcore_id = atoi(value);
+	if (lcore_id >= 0 && lcore_id < RTE_MAX_LCORE)
+		*(int *)opaque = lcore_id;
+	return 0;
+}
+
+static void
+skeldma_parse_vdev_args(struct rte_vdev_device *vdev, int *lcore_id)
+{
+	static const char *const args[] = {
+		SKELDMA_ARG_LCORE,
+		NULL
+	};
+
+	struct rte_kvargs *kvlist;
+	const char *params;
+
+	params = rte_vdev_device_args(vdev);
+	if (params == NULL || params[0] == '\0')
+		return;
+
+	kvlist = rte_kvargs_parse(params, args);
+	if (!kvlist)
+		return;
+
+	(void)rte_kvargs_process(kvlist, SKELDMA_ARG_LCORE,
+				 skeldma_parse_lcore, lcore_id);
+	SKELDMA_LOG(INFO, "Parse lcore_id = %d", *lcore_id);
+
+	rte_kvargs_free(kvlist);
+}
+
+static int
+skeldma_probe(struct rte_vdev_device *vdev)
+{
+	const char *name;
+	int lcore_id = -1;
+	int ret;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		SKELDMA_LOG(ERR, "Multiple process not supported for %s", name);
+		return -EINVAL;
+	}
+
+	/* More than one instance is not supported */
+	if (skeldma_count > 0) {
+		SKELDMA_LOG(ERR, "Multiple instance not supported for %s",
+			name);
+		return -EINVAL;
+	}
+
+	skeldma_parse_vdev_args(vdev, &lcore_id);
+
+	ret = skeldma_create(name, vdev, lcore_id);
+	if (ret >= 0) {
+		SKELDMA_LOG(INFO, "Create %s dmadev with lcore-id %d",
+			name, lcore_id);
+		skeldma_count = 1;
+	}
+
+	return ret < 0 ? ret : 0;
+}
+
+static int
+skeldma_remove(struct rte_vdev_device *vdev)
+{
+	const char *name;
+	int ret;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -1;
+
+	ret = skeldma_destroy(name);
+	if (!ret) {
+		skeldma_count = 0;
+		SKELDMA_LOG(INFO, "Remove %s dmadev", name);
+	}
+
+	return ret;
+}
+
+static struct rte_vdev_driver skeldma_pmd_drv = {
+	.probe = skeldma_probe,
+	.remove = skeldma_remove,
+	.drv_flags = RTE_VDEV_DRV_NEED_IOVA_AS_VA,
+};
+
+RTE_PMD_REGISTER_VDEV(dma_skeleton, skeldma_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(dma_skeleton,
+		SKELDMA_ARG_LCORE "=<uint16> ");
diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h
new file mode 100644
index 0000000000..eaa52364bf
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+#ifndef SKELETON_DMADEV_H
+#define SKELETON_DMADEV_H
+
+#include <pthread.h>
+
+#include <rte_ring.h>
+
+#define SKELDMA_ARG_LCORE	"lcore"
+
+struct skeldma_desc {
+	void *src;
+	void *dst;
+	uint32_t len;
+	uint16_t ridx; /* ring idx */
+};
+
+struct skeldma_hw {
+	int lcore_id; /* cpucopy task affinity core */
+	int socket_id;
+	pthread_t thread; /* cpucopy task thread */
+	volatile int exit_flag; /* cpucopy task exit flag */
+
+	struct skeldma_desc *desc_mem;
+
+	/* Descriptor ring state machine:
+	 *
+	 *  -----------     enqueue without submit     -----------
+	 *  |  empty  |------------------------------->| pending |
+	 *  -----------\                               -----------
+	 *       ^      \------------                       |
+	 *       |                  |                       |submit doorbell
+	 *       |                  |                       |
+	 *       |                  |enqueue with submit    |
+	 *       |get completed     |------------------|    |
+	 *       |                                     |    |
+	 *       |                                     v    v
+	 *  -----------     cpucopy thread working     -----------
+	 *  |completed|<-------------------------------| running |
+	 *  -----------                                -----------
+	 */
+	struct rte_ring *desc_empty;
+	struct rte_ring *desc_pending;
+	struct rte_ring *desc_running;
+	struct rte_ring *desc_completed;
+
+	/* Cache delimiter for dataplane API's operation data */
+	char cache1 __rte_cache_aligned;
+	uint16_t ridx;  /* ring idx */
+	uint64_t submitted_count;
+
+	/* Cache delimiter for cpucopy thread's operation data */
+	char cache2 __rte_cache_aligned;
+	uint32_t zero_req_count;
+	uint64_t completed_count;
+};
+
+#endif /* SKELETON_DMADEV_H */
diff --git a/drivers/dma/skeleton/version.map b/drivers/dma/skeleton/version.map
new file mode 100644
index 0000000000..c2e0723b4c
--- /dev/null
+++ b/drivers/dma/skeleton/version.map
@@ -0,0 +1,3 @@
+DPDK_22 {
+	local: *;
+};
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v24 6/6] app/test: add dmadev API test
  2021-10-09  9:33 ` [dpdk-dev] [PATCH v24 0/6] support dmadev Chengwen Feng
                     ` (4 preceding siblings ...)
  2021-10-09  9:33   ` [dpdk-dev] [PATCH v24 5/6] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
@ 2021-10-09  9:33   ` Chengwen Feng
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-10-09  9:33 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch add dmadev API test which based on 'dma_skeleton' vdev. The
test cases could be executed using 'dmadev_autotest' command in test
framework.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 MAINTAINERS                |   1 +
 app/test/meson.build       |   4 +
 app/test/test_dmadev.c     |  41 +++
 app/test/test_dmadev_api.c | 574 +++++++++++++++++++++++++++++++++++++
 app/test/test_dmadev_api.h |   5 +
 5 files changed, 625 insertions(+)
 create mode 100644 app/test/test_dmadev.c
 create mode 100644 app/test/test_dmadev_api.c
 create mode 100644 app/test/test_dmadev_api.h

diff --git a/MAINTAINERS b/MAINTAINERS
index ec887ac49f..d329873465 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -458,6 +458,7 @@ DMA device API - EXPERIMENTAL
 M: Chengwen Feng <fengchengwen@huawei.com>
 F: lib/dmadev/
 F: drivers/dma/skeleton/
+F: app/test/test_dmadev*
 F: doc/guides/prog_guide/dmadev.rst
 
 Eventdev API
diff --git a/app/test/meson.build b/app/test/meson.build
index f144d8b8ed..a16374b7a1 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -44,6 +44,8 @@ test_sources = files(
         'test_debug.c',
         'test_distributor.c',
         'test_distributor_perf.c',
+        'test_dmadev.c',
+        'test_dmadev_api.c',
         'test_eal_flags.c',
         'test_eal_fs.c',
         'test_efd.c',
@@ -163,6 +165,7 @@ test_deps = [
         'cmdline',
         'cryptodev',
         'distributor',
+        'dmadev',
         'efd',
         'ethdev',
         'eventdev',
@@ -334,6 +337,7 @@ driver_test_names = [
         'cryptodev_sw_mvsam_autotest',
         'cryptodev_sw_snow3g_autotest',
         'cryptodev_sw_zuc_autotest',
+        'dmadev_autotest',
         'eventdev_selftest_octeontx',
         'eventdev_selftest_sw',
         'rawdev_autotest',
diff --git a/app/test/test_dmadev.c b/app/test/test_dmadev.c
new file mode 100644
index 0000000000..45da6b76fe
--- /dev/null
+++ b/app/test/test_dmadev.c
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include <rte_dmadev.h>
+#include <rte_bus_vdev.h>
+
+#include "test.h"
+#include "test_dmadev_api.h"
+
+static int
+test_apis(void)
+{
+	const char *pmd = "dma_skeleton";
+	int id;
+	int ret;
+
+	if (rte_vdev_init(pmd, NULL) < 0)
+		return TEST_SKIPPED;
+	id = rte_dma_get_dev_id_by_name(pmd);
+	if (id < 0)
+		return TEST_SKIPPED;
+	printf("\n### Test dmadev infrastructure using skeleton driver\n");
+	ret = test_dma_api(id);
+	rte_vdev_uninit(pmd);
+
+	return ret;
+}
+
+static int
+test_dma(void)
+{
+	/* basic sanity on dmadev infrastructure */
+	if (test_apis() < 0)
+		return -1;
+
+	return 0;
+}
+
+REGISTER_TEST_COMMAND(dmadev_autotest, test_dma);
diff --git a/app/test/test_dmadev_api.c b/app/test/test_dmadev_api.c
new file mode 100644
index 0000000000..4a181af90a
--- /dev/null
+++ b/app/test/test_dmadev_api.c
@@ -0,0 +1,574 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+#include <string.h>
+
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_test.h>
+#include <rte_dmadev.h>
+
+extern int test_dma_api(uint16_t dev_id);
+
+#define DMA_TEST_API_RUN(test) \
+	testsuite_run_test(test, #test)
+
+#define TEST_MEMCPY_SIZE	1024
+#define TEST_WAIT_US_VAL	50000
+
+#define TEST_SUCCESS 0
+#define TEST_FAILED  -1
+
+static int16_t test_dev_id;
+static int16_t invalid_dev_id;
+
+static char *src;
+static char *dst;
+
+static int total;
+static int passed;
+static int failed;
+
+static int
+testsuite_setup(int16_t dev_id)
+{
+	test_dev_id = dev_id;
+	invalid_dev_id = -1;
+
+	src = rte_malloc("dmadev_test_src", TEST_MEMCPY_SIZE, 0);
+	if (src == NULL)
+		return -ENOMEM;
+	dst = rte_malloc("dmadev_test_dst", TEST_MEMCPY_SIZE, 0);
+	if (dst == NULL) {
+		rte_free(src);
+		src = NULL;
+		return -ENOMEM;
+	}
+
+	total = 0;
+	passed = 0;
+	failed = 0;
+
+	/* Set dmadev log level to critical to suppress unnecessary output
+	 * during API tests.
+	 */
+	rte_log_set_level_pattern("lib.dmadev", RTE_LOG_CRIT);
+
+	return 0;
+}
+
+static void
+testsuite_teardown(void)
+{
+	rte_free(src);
+	src = NULL;
+	rte_free(dst);
+	dst = NULL;
+	/* Ensure the dmadev is stopped. */
+	rte_dma_stop(test_dev_id);
+
+	rte_log_set_level_pattern("lib.dmadev", RTE_LOG_INFO);
+}
+
+static void
+testsuite_run_test(int (*test)(void), const char *name)
+{
+	int ret = 0;
+
+	if (test) {
+		ret = test();
+		if (ret < 0) {
+			failed++;
+			printf("%s Failed\n", name);
+		} else {
+			passed++;
+			printf("%s Passed\n", name);
+		}
+	}
+
+	total++;
+}
+
+static int
+test_dma_get_dev_id_by_name(void)
+{
+	int ret = rte_dma_get_dev_id_by_name("invalid_dmadev_device");
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_is_valid_dev(void)
+{
+	int ret;
+	ret = rte_dma_is_valid(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == false, "Expected false for invalid dev id");
+	ret = rte_dma_is_valid(test_dev_id);
+	RTE_TEST_ASSERT(ret == true, "Expected true for valid dev id");
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_count(void)
+{
+	uint16_t count = rte_dma_count_avail();
+	RTE_TEST_ASSERT(count > 0, "Invalid dmadev count %u", count);
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_info_get(void)
+{
+	struct rte_dma_info info =  { 0 };
+	int ret;
+
+	ret = rte_dma_info_get(invalid_dev_id, &info);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_info_get(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_configure(void)
+{
+	struct rte_dma_conf conf = { 0 };
+	struct rte_dma_info info = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_configure(invalid_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_configure(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for nb_vchans == 0 */
+	memset(&conf, 0, sizeof(conf));
+	ret = rte_dma_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for conf.nb_vchans > info.max_vchans */
+	ret = rte_dma_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans + 1;
+	ret = rte_dma_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check enable silent mode */
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans;
+	conf.enable_silent = true;
+	ret = rte_dma_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Configure success */
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans;
+	ret = rte_dma_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret);
+
+	/* Check configure success */
+	ret = rte_dma_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	RTE_TEST_ASSERT_EQUAL(conf.nb_vchans, info.nb_vchans,
+			      "Configure nb_vchans not match");
+
+	return TEST_SUCCESS;
+}
+
+static int
+check_direction(void)
+{
+	struct rte_dma_vchan_conf vchan_conf;
+	int ret;
+
+	/* Check for direction */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV + 1;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM - 1;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for direction and dev_capa combination */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_DEV;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_MEM;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	return 0;
+}
+
+static int
+check_port_type(struct rte_dma_info *dev_info)
+{
+	struct rte_dma_vchan_conf vchan_conf;
+	int ret;
+
+	/* Check src port type validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info->min_desc;
+	vchan_conf.src_port.port_type = RTE_DMA_PORT_PCIE;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check dst port type validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info->min_desc;
+	vchan_conf.dst_port.port_type = RTE_DMA_PORT_PCIE;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	return 0;
+}
+
+static int
+test_dma_vchan_setup(void)
+{
+	struct rte_dma_vchan_conf vchan_conf = { 0 };
+	struct rte_dma_conf dev_conf = { 0 };
+	struct rte_dma_info dev_info = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_vchan_setup(invalid_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_vchan_setup(test_dev_id, 0, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Make sure configure success */
+	ret = rte_dma_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	dev_conf.nb_vchans = dev_info.max_vchans;
+	ret = rte_dma_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret);
+
+	/* Check for invalid vchan */
+	ret = rte_dma_vchan_setup(test_dev_id, dev_conf.nb_vchans, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for direction */
+	ret = check_direction();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to check direction");
+
+	/* Check for nb_desc validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc - 1;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.nb_desc = dev_info.max_desc + 1;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check port type */
+	ret = check_port_type(&dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to check port type");
+
+	/* Check vchan setup success */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+setup_one_vchan(void)
+{
+	struct rte_dma_vchan_conf vchan_conf = { 0 };
+	struct rte_dma_info dev_info = { 0 };
+	struct rte_dma_conf dev_conf = { 0 };
+	int ret;
+
+	ret = rte_dma_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+	dev_conf.nb_vchans = dev_info.max_vchans;
+	ret = rte_dma_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_start_stop(void)
+{
+	struct rte_dma_vchan_conf vchan_conf = { 0 };
+	struct rte_dma_conf dev_conf = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_start(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_stop(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dma_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Check reconfigure and vchan setup when device started */
+	ret = rte_dma_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT(ret == -EBUSY, "Failed to configure, %d", ret);
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EBUSY, "Failed to setup vchan, %d", ret);
+
+	ret = rte_dma_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_stats(void)
+{
+	struct rte_dma_info dev_info = { 0 };
+	struct rte_dma_stats stats = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_stats_get(invalid_dev_id, 0, &stats);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_stats_get(invalid_dev_id, 0, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_stats_reset(invalid_dev_id, 0);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	/* Check for invalid vchan */
+	ret = rte_dma_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+	ret = rte_dma_stats_get(test_dev_id, dev_info.max_vchans, &stats);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_stats_reset(test_dev_id, dev_info.max_vchans);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for valid vchan */
+	ret = rte_dma_stats_get(test_dev_id, 0, &stats);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get stats, %d", ret);
+	ret = rte_dma_stats_get(test_dev_id, RTE_DMA_ALL_VCHAN, &stats);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get all stats, %d", ret);
+	ret = rte_dma_stats_reset(test_dev_id, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset stats, %d", ret);
+	ret = rte_dma_stats_reset(test_dev_id, RTE_DMA_ALL_VCHAN);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset all stats, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_dump(void)
+{
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_dump(invalid_dev_id, stderr);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Excepted -EINVAL, %d", ret);
+	ret = rte_dma_dump(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Excepted -EINVAL, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static void
+setup_memory(void)
+{
+	int i;
+
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++)
+		src[i] = (char)i;
+	memset(dst, 0, TEST_MEMCPY_SIZE);
+}
+
+static int
+verify_memory(void)
+{
+	int i;
+
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++) {
+		if (src[i] == dst[i])
+			continue;
+		RTE_TEST_ASSERT_EQUAL(src[i], dst[i],
+			"Failed to copy memory, %d %d", src[i], dst[i]);
+	}
+
+	return 0;
+}
+
+static int
+test_dma_completed(void)
+{
+	uint16_t last_idx = 1;
+	bool has_error = true;
+	uint16_t cpl_ret;
+	int ret;
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dma_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	setup_memory();
+
+	/* Check enqueue without submit */
+	ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+			   TEST_MEMCPY_SIZE, 0);
+	RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed(test_dev_id, 0, 1, &last_idx, &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to get completed");
+
+	/* Check add submit */
+	ret = rte_dma_submit(test_dev_id, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to submit, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed(test_dev_id, 0, 1, &last_idx, &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u",
+				last_idx);
+	RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+	ret = verify_memory();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to verify memory");
+
+	setup_memory();
+
+	/* Check for enqueue with submit */
+	ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+			   TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 1, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed(test_dev_id, 0, 1, &last_idx, &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u",
+				last_idx);
+	RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+	ret = verify_memory();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to verify memory");
+
+	/* Stop dmadev to make sure dmadev to a known state */
+	ret = rte_dma_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_completed_status(void)
+{
+	enum rte_dma_status_code status[1] = { 1 };
+	uint16_t last_idx = 1;
+	uint16_t cpl_ret, i;
+	int ret;
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dma_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Check for enqueue with submit */
+	ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+			   TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed_status(test_dev_id, 0, 1, &last_idx,
+					   status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u",
+				last_idx);
+	for (i = 0; i < RTE_DIM(status); i++)
+		RTE_TEST_ASSERT_EQUAL(status[i], 0,
+				"Failed to completed status, %d", status[i]);
+
+	/* Check do completed status again */
+	cpl_ret = rte_dma_completed_status(test_dev_id, 0, 1, &last_idx,
+					   status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to completed status");
+
+	/* Check for enqueue with submit again */
+	ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+			   TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 1, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed_status(test_dev_id, 0, 1, &last_idx,
+					   status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u",
+				last_idx);
+	for (i = 0; i < RTE_DIM(status); i++)
+		RTE_TEST_ASSERT_EQUAL(status[i], 0,
+				"Failed to completed status, %d", status[i]);
+
+	/* Stop dmadev to make sure dmadev to a known state */
+	ret = rte_dma_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+int
+test_dma_api(uint16_t dev_id)
+{
+	int ret = testsuite_setup(dev_id);
+	if (ret) {
+		printf("testsuite setup fail!\n");
+		return -1;
+	}
+
+	/* If the testcase exit successfully, ensure that the test dmadev exist
+	 * and the dmadev is in the stopped state.
+	 */
+	DMA_TEST_API_RUN(test_dma_get_dev_id_by_name);
+	DMA_TEST_API_RUN(test_dma_is_valid_dev);
+	DMA_TEST_API_RUN(test_dma_count);
+	DMA_TEST_API_RUN(test_dma_info_get);
+	DMA_TEST_API_RUN(test_dma_configure);
+	DMA_TEST_API_RUN(test_dma_vchan_setup);
+	DMA_TEST_API_RUN(test_dma_start_stop);
+	DMA_TEST_API_RUN(test_dma_stats);
+	DMA_TEST_API_RUN(test_dma_dump);
+	DMA_TEST_API_RUN(test_dma_completed);
+	DMA_TEST_API_RUN(test_dma_completed_status);
+
+	testsuite_teardown();
+
+	printf("Total tests   : %d\n", total);
+	printf("Passed        : %d\n", passed);
+	printf("Failed        : %d\n", failed);
+
+	if (failed)
+		return -1;
+
+	return 0;
+};
diff --git a/app/test/test_dmadev_api.h b/app/test/test_dmadev_api.h
new file mode 100644
index 0000000000..33fbc5bd41
--- /dev/null
+++ b/app/test/test_dmadev_api.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+int test_dma_api(uint16_t dev_id);
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v24 3/6] dmadev: add data plane API support
  2021-10-09  9:33   ` [dpdk-dev] [PATCH v24 3/6] dmadev: add data " Chengwen Feng
@ 2021-10-09 10:03     ` fengchengwen
  2021-10-11 10:40     ` Bruce Richardson
  1 sibling, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-10-09 10:03 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch follows the idea of Konstantin, and introduces rte_dma_fp_object to hide
implementation detail.
This change modify the first parameter of drivers's dataplane interface: from
'struct rte_dma_dev *dev' to 'void *dev_private'.

On 2021/10/9 17:33, Chengwen Feng wrote:
> This patch add data plane API for dmadev.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
> Reviewed-by: Conor Walsh <conor.walsh@intel.com>
> ---
>  doc/guides/prog_guide/dmadev.rst       |  22 ++
>  doc/guides/rel_notes/release_21_11.rst |   2 +-
>  lib/dmadev/meson.build                 |   1 +
>  lib/dmadev/rte_dmadev.c                | 134 ++++++++
>  lib/dmadev/rte_dmadev.h                | 451 +++++++++++++++++++++++++
>  lib/dmadev/rte_dmadev_core.h           |  78 +++++
>  lib/dmadev/rte_dmadev_pmd.h            |   7 +
>  lib/dmadev/version.map                 |   6 +
>  8 files changed, 700 insertions(+), 1 deletion(-)
>  create mode 100644 lib/dmadev/rte_dmadev_core.h
> 

[snip]


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v25 0/6] support dmadev
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (27 preceding siblings ...)
  2021-10-09  9:33 ` [dpdk-dev] [PATCH v24 0/6] support dmadev Chengwen Feng
@ 2021-10-11  7:33 ` Chengwen Feng
  2021-10-11  7:33   ` [dpdk-dev] [PATCH v25 1/6] dmadev: introduce DMA device library Chengwen Feng
                     ` (5 more replies)
  2021-10-13 12:24 ` [dpdk-dev] [PATCH v26 0/6] support dmadev Chengwen Feng
  29 siblings, 6 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-10-11  7:33 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch set contains six patch for new add dmadev.

Chengwen Feng (6):
  dmadev: introduce DMA device library
  dmadev: add control plane API support
  dmadev: add data plane API support
  dmadev: add multi-process support
  dma/skeleton: introduce skeleton dmadev driver
  app/test: add dmadev API test

---
v25:
* fix undefined reference to rte_dma_fp_objs with some compiler suite.
* use rte_dma_dev hold pointer to rte_dma_fp_objs to avoid primary and
  secondary disuniformity.
v24:
* use rte_dma_fp_object to hide implementation details.
* support group doxygen for RTE_DMA_CAPA_* and RTE_DMA_OP_*.
* adjusted the naming of some functions.
* fix typo.
v23:
* split multi-process support from 1st patch.
* fix some static check warning.
* fix skeleton cpu thread zero_req_count flip bug.
* add test_dmadev_api.h.
* add the description of modifying the dmadev state when init OK.
v22:
* function prefix change from rte_dmadev_* to rte_dma_*.
* change to prefix comment in most scenarios.
* dmadev dev_id use int16_t type.
* fix typo.
* organize patchsets in incremental mode.
v21:
* add comment for reserved fields of struct rte_dmadev.

 MAINTAINERS                            |    7 +
 app/test/meson.build                   |    4 +
 app/test/test_dmadev.c                 |   41 +
 app/test/test_dmadev_api.c             |  574 +++++++++++++
 app/test/test_dmadev_api.h             |    5 +
 doc/api/doxy-api-index.md              |    1 +
 doc/api/doxy-api.conf.in               |    1 +
 doc/guides/dmadevs/index.rst           |   12 +
 doc/guides/index.rst                   |    1 +
 doc/guides/prog_guide/dmadev.rst       |  120 +++
 doc/guides/prog_guide/img/dmadev.svg   |  283 +++++++
 doc/guides/prog_guide/index.rst        |    1 +
 doc/guides/rel_notes/release_21_11.rst |    6 +
 drivers/dma/meson.build                |    6 +
 drivers/dma/skeleton/meson.build       |    7 +
 drivers/dma/skeleton/skeleton_dmadev.c |  571 +++++++++++++
 drivers/dma/skeleton/skeleton_dmadev.h |   61 ++
 drivers/dma/skeleton/version.map       |    3 +
 drivers/meson.build                    |    1 +
 lib/dmadev/meson.build                 |    7 +
 lib/dmadev/rte_dmadev.c                |  825 +++++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 1048 ++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h           |   81 ++
 lib/dmadev/rte_dmadev_pmd.h            |  168 ++++
 lib/dmadev/version.map                 |   36 +
 lib/meson.build                        |    1 +
 26 files changed, 3871 insertions(+)
 create mode 100644 app/test/test_dmadev.c
 create mode 100644 app/test/test_dmadev_api.c
 create mode 100644 app/test/test_dmadev_api.h
 create mode 100644 doc/guides/dmadevs/index.rst
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg
 create mode 100644 drivers/dma/meson.build
 create mode 100644 drivers/dma/skeleton/meson.build
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
 create mode 100644 drivers/dma/skeleton/version.map
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v25 1/6] dmadev: introduce DMA device library
  2021-10-11  7:33 ` [dpdk-dev] [PATCH v25 0/6] support dmadev Chengwen Feng
@ 2021-10-11  7:33   ` Chengwen Feng
  2021-10-12 19:09     ` Thomas Monjalon
  2021-10-11  7:33   ` [dpdk-dev] [PATCH v25 2/6] dmadev: add control plane API support Chengwen Feng
                     ` (4 subsequent siblings)
  5 siblings, 1 reply; 339+ messages in thread
From: Chengwen Feng @ 2021-10-11  7:33 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

The 'dmadev' is a generic type of DMA device.

This patch introduce the 'dmadev' device allocation functions.

The infrastructure is prepared to welcome drivers in drivers/dma/

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 MAINTAINERS                            |   5 +
 doc/api/doxy-api-index.md              |   1 +
 doc/api/doxy-api.conf.in               |   1 +
 doc/guides/dmadevs/index.rst           |  12 ++
 doc/guides/index.rst                   |   1 +
 doc/guides/prog_guide/dmadev.rst       |  60 ++++++
 doc/guides/prog_guide/img/dmadev.svg   | 283 +++++++++++++++++++++++++
 doc/guides/prog_guide/index.rst        |   1 +
 doc/guides/rel_notes/release_21_11.rst |   4 +
 drivers/dma/meson.build                |   4 +
 drivers/meson.build                    |   1 +
 lib/dmadev/meson.build                 |   6 +
 lib/dmadev/rte_dmadev.c                | 246 +++++++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 133 ++++++++++++
 lib/dmadev/rte_dmadev_pmd.h            |  90 ++++++++
 lib/dmadev/version.map                 |  20 ++
 lib/meson.build                        |   1 +
 17 files changed, 869 insertions(+)
 create mode 100644 doc/guides/dmadevs/index.rst
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg
 create mode 100644 drivers/dma/meson.build
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 278e5b3226..119cfaa04e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -454,6 +454,11 @@ F: app/test-regex/
 F: doc/guides/prog_guide/regexdev.rst
 F: doc/guides/regexdevs/features/default.ini
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+F: doc/guides/prog_guide/dmadev.rst
+
 Eventdev API
 M: Jerin Jacob <jerinj@marvell.com>
 T: git://dpdk.org/next/dpdk-next-eventdev
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107a03..2939050431 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -21,6 +21,7 @@ The public API headers are grouped by topics:
   [compressdev]        (@ref rte_compressdev.h),
   [compress]           (@ref rte_comp.h),
   [regexdev]           (@ref rte_regexdev.h),
+  [dmadev]             (@ref rte_dmadev.h),
   [eventdev]           (@ref rte_eventdev.h),
   [event_eth_rx_adapter]   (@ref rte_event_eth_rx_adapter.h),
   [event_eth_tx_adapter]   (@ref rte_event_eth_tx_adapter.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 325a0195c6..109ec1f682 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -35,6 +35,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
                           @TOPDIR@/lib/distributor \
+                          @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/efd \
                           @TOPDIR@/lib/ethdev \
                           @TOPDIR@/lib/eventdev \
diff --git a/doc/guides/dmadevs/index.rst b/doc/guides/dmadevs/index.rst
new file mode 100644
index 0000000000..0bce29d766
--- /dev/null
+++ b/doc/guides/dmadevs/index.rst
@@ -0,0 +1,12 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright 2021 HiSilicon Limited
+
+DMA Device Drivers
+==================
+
+The following are a list of DMA device drivers, which can be used from
+an application through DMA API.
+
+.. toctree::
+   :maxdepth: 2
+   :numbered:
diff --git a/doc/guides/index.rst b/doc/guides/index.rst
index 857f0363d3..919825992e 100644
--- a/doc/guides/index.rst
+++ b/doc/guides/index.rst
@@ -21,6 +21,7 @@ DPDK documentation
    compressdevs/index
    vdpadevs/index
    regexdevs/index
+   dmadevs/index
    eventdevs/index
    rawdevs/index
    mempool/index
diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
new file mode 100644
index 0000000000..90bda28f33
--- /dev/null
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -0,0 +1,60 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright 2021 HiSilicon Limited
+
+DMA Device Library
+==================
+
+The DMA library provides a DMA device framework for management and provisioning
+of hardware and software DMA poll mode drivers, defining generic API which
+support a number of different DMA operations.
+
+
+Design Principles
+-----------------
+
+The DMA framework provides a generic DMA device framework which supports both
+physical (hardware) and virtual (software) DMA devices, as well as a generic DMA
+API which allows DMA devices to be managed and configured, and supports DMA
+operations to be provisioned on DMA poll mode driver.
+
+.. _figure_dmadev:
+
+.. figure:: img/dmadev.*
+
+The above figure shows the model on which the DMA framework is built on:
+
+ * The DMA controller could have multiple hardware DMA channels (aka. hardware
+   DMA queues), each hardware DMA channel should be represented by a dmadev.
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+   channel represents a different transfer context. The DMA operation request
+   must be submitted to the virtual DMA channel. e.g. Application could create
+   virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+   virtual DMA channel 1 for memory-to-device transfer scenario.
+
+
+Device Management
+-----------------
+
+Device Creation
+~~~~~~~~~~~~~~~
+
+Physical DMA controllers are discovered during the PCI probe/enumeration of the
+EAL function which is executed at DPDK initialization, this is based on their
+PCI BDF (bus/bridge, device, function). Specific physical DMA controllers, like
+other physical devices in DPDK can be listed using the EAL command line options.
+
+The dmadevs are dynamically allocated by using the function
+``rte_dma_pmd_allocate`` based on the number of hardware DMA channels.
+
+
+Device Identification
+~~~~~~~~~~~~~~~~~~~~~
+
+Each DMA device, whether physical or virtual is uniquely designated by two
+identifiers:
+
+- A unique device index used to designate the DMA device in all functions
+  exported by the DMA API.
+
+- A device name used to designate the DMA device in console messages, for
+  administration or debugging purposes.
diff --git a/doc/guides/prog_guide/img/dmadev.svg b/doc/guides/prog_guide/img/dmadev.svg
new file mode 100644
index 0000000000..157d7eb7dc
--- /dev/null
+++ b/doc/guides/prog_guide/img/dmadev.svg
@@ -0,0 +1,283 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<!-- SPDX-License-Identifier: BSD-3-Clause -->
+<!-- Copyright(c) 2021 HiSilicon Limited -->
+
+<svg
+   width="128.64288mm"
+   height="95.477707mm"
+   viewBox="0 0 192.96433 143.21656"
+   version="1.1"
+   id="svg934"
+   inkscape:version="1.1 (c68e22c387, 2021-05-23)"
+   sodipodi:docname="dmadev.svg"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg">
+  <sodipodi:namedview
+     id="namedview936"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageshadow="2"
+     inkscape:pageopacity="0.0"
+     inkscape:pagecheckerboard="0"
+     inkscape:document-units="mm"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:showpageshadow="false"
+     inkscape:zoom="1.332716"
+     inkscape:cx="335.03011"
+     inkscape:cy="143.69152"
+     inkscape:window-width="1920"
+     inkscape:window-height="976"
+     inkscape:window-x="-8"
+     inkscape:window-y="-8"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="layer1"
+     scale-x="1.5"
+     units="mm" />
+  <defs
+     id="defs931">
+    <rect
+       x="342.43954"
+       y="106.56832"
+       width="58.257381"
+       height="137.82834"
+       id="rect17873" />
+  </defs>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-0.13857517,-21.527306)">
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9"
+       width="50"
+       height="28"
+       x="0.13857517"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1"
+       transform="translate(-49.110795,15.205683)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1045">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1047">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5"
+       width="50"
+       height="28"
+       x="60.138577"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4"
+       transform="translate(10.512565,15.373298)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1049">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1051">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5-3"
+       width="50"
+       height="28"
+       x="137.43863"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-8"
+       transform="translate(88.79231,15.373299)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1053">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1055">channel</tspan></text>
+    <text
+       xml:space="preserve"
+       transform="matrix(0.26458333,0,0,0.26458333,-0.04940429,21.408845)"
+       id="text17871"
+       style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;white-space:pre;shape-inside:url(#rect17873);fill:#000000;fill-opacity:1;stroke:none" />
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8"
+       width="38.34557"
+       height="19.729115"
+       x="36.138577"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3"
+       transform="translate(-13.394978,59.135217)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1057">dmadev</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0"
+       width="60.902534"
+       height="24.616455"
+       x="25.196909"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76"
+       transform="translate(-24.485484,90.97883)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1059">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1061">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-6"
+       width="60.902534"
+       height="24.616455"
+       x="132.20036"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-7"
+       transform="translate(82.950904,90.79085)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1063">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1065">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-4"
+       width="60.902534"
+       height="24.616455"
+       x="76.810928"
+       y="140.12741"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-4"
+       transform="translate(27.032341,133.10574)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1067">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1069">controller</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8-5"
+       width="38.34557"
+       height="19.729115"
+       x="143.43863"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-7"
+       transform="translate(94.92597,59.664385)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1071">dmadev</tspan></text>
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 74.476373,49.527306 62.82407,64.827354"
+       id="path45308"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 35.924309,49.527306 47.711612,64.827354"
+       id="path45310"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 55.403414,84.556469 55.53332,98.47744"
+       id="path45312"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8"
+       inkscape:connection-end="#rect31-9-5-8-0" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.62241,84.556469 0.0155,13.920971"
+       id="path45320"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-5"
+       inkscape:connection-end="#rect31-9-5-8-0-6" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 146.28317,123.09389 -22.65252,17.03352"
+       id="path45586"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0-6"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 70.900938,123.09389 21.108496,17.03352"
+       id="path45588"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.50039,49.527306 0.0675,15.300048"
+       id="path45956"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-3"
+       inkscape:connection-end="#rect31-9-5-8-5" />
+  </g>
+</svg>
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 2dce507f46..89af28dacb 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -27,6 +27,7 @@ Programmer's Guide
     cryptodev_lib
     compressdev
     regexdev
+    dmadev
     rte_security
     rawdev
     link_bonding_poll_mode_drv_lib
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 89d4b33ef1..929c0d6113 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -141,6 +141,10 @@ New Features
   * Added tests to validate packets hard expiry.
   * Added tests to verify tunnel header verification in IPsec inbound.
 
+* **Introduced dmadev library with:**
+
+  * Device allocation functions.
+
 
 Removed Items
 -------------
diff --git a/drivers/dma/meson.build b/drivers/dma/meson.build
new file mode 100644
index 0000000000..a24c56d8ff
--- /dev/null
+++ b/drivers/dma/meson.build
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2021 HiSilicon Limited
+
+drivers = []
diff --git a/drivers/meson.build b/drivers/meson.build
index 3d08540581..b7d680868a 100644
--- a/drivers/meson.build
+++ b/drivers/meson.build
@@ -18,6 +18,7 @@ subdirs = [
         'vdpa',           # depends on common, bus and mempool.
         'event',          # depends on common, bus, mempool and net.
         'baseband',       # depends on common and bus.
+        'dma',            # depends on common and bus.
 ]
 
 if meson.is_cross_build()
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000000..f8d54c6e74
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+sources = files('rte_dmadev.c')
+headers = files('rte_dmadev.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000000..42a4693bd9
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include <inttypes.h>
+
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+static int16_t dma_devices_max;
+
+struct rte_dma_dev *rte_dma_devices;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
+#define RTE_DMA_LOG(level, ...) \
+	rte_log(RTE_LOG_ ## level, rte_dma_logtype, RTE_FMT("dma: " \
+		RTE_FMT_HEAD(__VA_ARGS__,) "\n", RTE_FMT_TAIL(__VA_ARGS__,)))
+
+int
+rte_dma_dev_max(size_t dev_max)
+{
+	/* This function may be called before rte_eal_init(), so no rte library
+	 * function can be called in this function.
+	 */
+	if (dev_max == 0 || dev_max > INT16_MAX)
+		return -EINVAL;
+
+	if (dma_devices_max > 0)
+		return -EINVAL;
+
+	dma_devices_max = dev_max;
+
+	return 0;
+}
+
+static int
+dma_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMA_LOG(ERR, "Name can't be NULL");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMA_LOG(ERR, "Zero length DMA device name");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DEV_NAME_MAX_LEN) {
+		RTE_DMA_LOG(ERR, "DMA device name is too long");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int16_t
+dma_find_free_id(void)
+{
+	int16_t i;
+
+	if (rte_dma_devices == NULL)
+		return -1;
+
+	for (i = 0; i < dma_devices_max; i++) {
+		if (rte_dma_devices[i].state == RTE_DMA_DEV_UNUSED)
+			return i;
+	}
+
+	return -1;
+}
+
+static struct rte_dma_dev*
+dma_find_by_name(const char *name)
+{
+	int16_t i;
+
+	if (rte_dma_devices == NULL)
+		return NULL;
+
+	for (i = 0; i < dma_devices_max; i++) {
+		if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) &&
+		    (!strcmp(name, rte_dma_devices[i].dev_name)))
+			return &rte_dma_devices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dma_dev_data_prepare(void)
+{
+	size_t size;
+
+	if (rte_dma_devices != NULL)
+		return 0;
+
+	size = dma_devices_max * sizeof(struct rte_dma_dev);
+	rte_dma_devices = malloc(size);
+	if (rte_dma_devices == NULL)
+		return -ENOMEM;
+	memset(rte_dma_devices, 0, size);
+
+	return 0;
+}
+
+static int
+dma_data_prepare(void)
+{
+	if (dma_devices_max == 0)
+		dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
+	return dma_dev_data_prepare();
+}
+
+static struct rte_dma_dev *
+dma_allocate(const char *name, int numa_node, size_t private_data_size)
+{
+	struct rte_dma_dev *dev;
+	void *dev_private;
+	int16_t dev_id;
+	int ret;
+
+	ret = dma_data_prepare();
+	if (ret < 0) {
+		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
+		return NULL;
+	}
+
+	dev = dma_find_by_name(name);
+	if (dev != NULL) {
+		RTE_DMA_LOG(ERR, "DMA device already allocated");
+		return NULL;
+	}
+
+	dev_private = rte_zmalloc_socket(name, private_data_size,
+					 RTE_CACHE_LINE_SIZE, numa_node);
+	if (dev_private == NULL) {
+		RTE_DMA_LOG(ERR, "Cannot allocate private data");
+		return NULL;
+	}
+
+	dev_id = dma_find_free_id();
+	if (dev_id < 0) {
+		RTE_DMA_LOG(ERR, "Reached maximum number of DMA devices");
+		rte_free(dev_private);
+		return NULL;
+	}
+
+	dev = &rte_dma_devices[dev_id];
+	rte_strscpy(dev->dev_name, name, sizeof(dev->dev_name));
+	dev->dev_id = dev_id;
+	dev->numa_node = numa_node;
+	dev->dev_private = dev_private;
+
+	return dev;
+}
+
+static void
+dma_release(struct rte_dma_dev *dev)
+{
+	rte_free(dev->dev_private);
+	memset(dev, 0, sizeof(struct rte_dma_dev));
+}
+
+struct rte_dma_dev *
+rte_dma_pmd_allocate(const char *name, int numa_node, size_t private_data_size)
+{
+	struct rte_dma_dev *dev;
+
+	if (dma_check_name(name) != 0 || private_data_size == 0)
+		return NULL;
+
+	dev = dma_allocate(name, numa_node, private_data_size);
+	if (dev == NULL)
+		return NULL;
+
+	dev->state = RTE_DMA_DEV_REGISTERED;
+
+	return dev;
+}
+
+int
+rte_dma_pmd_release(const char *name)
+{
+	struct rte_dma_dev *dev;
+
+	if (dma_check_name(name) != 0)
+		return -EINVAL;
+
+	dev = dma_find_by_name(name);
+	if (dev == NULL)
+		return -EINVAL;
+
+	dma_release(dev);
+	return 0;
+}
+
+int
+rte_dma_get_dev_id_by_name(const char *name)
+{
+	struct rte_dma_dev *dev;
+
+	if (dma_check_name(name) != 0)
+		return -EINVAL;
+
+	dev = dma_find_by_name(name);
+	if (dev == NULL)
+		return -EINVAL;
+
+	return dev->dev_id;
+}
+
+bool
+rte_dma_is_valid(int16_t dev_id)
+{
+	return (dev_id >= 0) && (dev_id < dma_devices_max) &&
+		rte_dma_devices != NULL &&
+		rte_dma_devices[dev_id].state != RTE_DMA_DEV_UNUSED;
+}
+
+uint16_t
+rte_dma_count_avail(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	if (rte_dma_devices == NULL)
+		return count;
+
+	for (i = 0; i < dma_devices_max; i++) {
+		if (rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED)
+			count++;
+	}
+
+	return count;
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000000..87810f2f08
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ * Copyright(c) 2021 Intel Corporation
+ * Copyright(c) 2021 Marvell International Ltd
+ * Copyright(c) 2021 SmartShare Systems
+ */
+
+#ifndef RTE_DMADEV_H
+#define RTE_DMADEV_H
+
+/**
+ * @file rte_dmadev.h
+ *
+ * DMA (Direct Memory Access) device API.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW DMA channel |               | HW DMA channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW DMA Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel. e.g. Application could create
+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+ * virtual DMA channel 1 for memory-to-device transfer scenario.
+ *
+ * This framework uses 'int16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ */
+
+#include <stdint.h>
+
+#include <rte_bitops.h>
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Maximum number of devices if rte_dma_dev_max() is not called. */
+#define RTE_DMADEV_DEFAULT_MAX 64
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure the maximum number of dmadevs.
+ * @note This function can be invoked before the primary process rte_eal_init()
+ * to change the maximum number of dmadevs. If not invoked, the maximum number
+ * of dmadevs is @see RTE_DMADEV_DEFAULT_MAX
+ *
+ * @param dev_max
+ *   maximum number of dmadevs.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_dev_max(size_t dev_max);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the device identifier for the named DMA device.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   Returns DMA device identifier on success.
+ *   - <0: Failure to find named DMA device.
+ */
+__rte_experimental
+int rte_dma_get_dev_id_by_name(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Check whether the dev_id is valid.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool rte_dma_is_valid(int16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t rte_dma_count_avail(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_DMADEV_H */
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000000..bb09382dce
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+#ifndef RTE_DMADEV_PMD_H
+#define RTE_DMADEV_PMD_H
+
+/**
+ * @file
+ *
+ * DMA Device PMD interface
+ *
+ * Driver facing interface for a DMA device. These are not to be called directly
+ * by any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Possible states of a DMA device.
+ *
+ * @see struct rte_dma_dev::state
+ */
+enum rte_dma_dev_state {
+	RTE_DMA_DEV_UNUSED = 0, /**< Device is unused. */
+	/** Device is registered, but not ready to be used. */
+	RTE_DMA_DEV_REGISTERED,
+	/** Device is ready for use. This is set by the PMD. */
+	RTE_DMA_DEV_READY,
+};
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ */
+struct rte_dma_dev {
+	char dev_name[RTE_DEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	int16_t dev_id; /**< Device [external] identifier. */
+	int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */
+	void *dev_private; /**< PMD-specific private data. */
+	/** Device info which supplied during device initialization. */
+	struct rte_device *device;
+	enum rte_dma_dev_state state; /**< Flag indicating the device state. */
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+} __rte_cache_aligned;
+
+extern struct rte_dma_dev *rte_dma_devices;
+
+/**
+ * @internal
+ * Allocate a new dmadev slot for an DMA device and return the pointer to that
+ * slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ * @param numa_node
+ *   Driver's private data's NUMA node.
+ * @param private_data_size
+ *   Driver's private data size.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dma_dev *rte_dma_pmd_allocate(const char *name, int numa_node,
+					 size_t private_data_size);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   - 0 on success, negative on error.
+ */
+__rte_internal
+int rte_dma_pmd_release(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_DMADEV_PMD_H */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000000..f8a0076468
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,20 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dma_count_avail;
+	rte_dma_dev_max;
+	rte_dma_get_dev_id_by_name;
+	rte_dma_is_valid;
+
+	local: *;
+};
+
+INTERNAL {
+	global:
+
+	rte_dma_devices;
+	rte_dma_pmd_allocate;
+	rte_dma_pmd_release;
+
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index 9c4841fe40..0bf1f51357 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -45,6 +45,7 @@ libraries = [
         'pdump',
         'rawdev',
         'regexdev',
+        'dmadev',
         'rib',
         'reorder',
         'sched',
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v25 2/6] dmadev: add control plane API support
  2021-10-11  7:33 ` [dpdk-dev] [PATCH v25 0/6] support dmadev Chengwen Feng
  2021-10-11  7:33   ` [dpdk-dev] [PATCH v25 1/6] dmadev: introduce DMA device library Chengwen Feng
@ 2021-10-11  7:33   ` Chengwen Feng
  2021-10-11 15:44     ` Bruce Richardson
  2021-10-12 18:57     ` Thomas Monjalon
  2021-10-11  7:33   ` [dpdk-dev] [PATCH v25 3/6] dmadev: add data " Chengwen Feng
                     ` (3 subsequent siblings)
  5 siblings, 2 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-10-11  7:33 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch add control plane API for dmadev.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 doc/guides/prog_guide/dmadev.rst       |  38 ++
 doc/guides/rel_notes/release_21_11.rst |   1 +
 lib/dmadev/rte_dmadev.c                | 360 +++++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 464 +++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_pmd.h            |  61 ++++
 lib/dmadev/version.map                 |   9 +
 6 files changed, 933 insertions(+)

diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
index 90bda28f33..5c70ad3d6a 100644
--- a/doc/guides/prog_guide/dmadev.rst
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -58,3 +58,41 @@ identifiers:
 
 - A device name used to designate the DMA device in console messages, for
   administration or debugging purposes.
+
+
+Device Configuration
+~~~~~~~~~~~~~~~~~~~~
+
+The rte_dma_configure API is used to configure a DMA device.
+
+.. code-block:: c
+
+   int rte_dma_configure(int16_t dev_id,
+                         const struct rte_dma_conf *dev_conf);
+
+The ``rte_dma_conf`` structure is used to pass the configuration parameters
+for the DMA device.
+
+
+Configuration of Virtual DMA Channels
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The rte_dma_vchan_setup API is used to configure a virtual DMA channel.
+
+.. code-block:: c
+
+   int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+                           const struct rte_dma_vchan_conf *conf);
+
+The ``rte_dma_vchan_conf`` structure is used to pass the configuration
+parameters for the virtual DMA channel.
+
+
+Device Features and Capabilities
+--------------------------------
+
+DMA devices may support different feature sets. The ``rte_dma_info_get`` API
+can be used to get the device info and supported features.
+
+Silent mode is a special device capability which does not require the
+application to invoke dequeue APIs.
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 929c0d6113..f935a3f395 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -144,6 +144,7 @@ New Features
 * **Introduced dmadev library with:**
 
   * Device allocation functions.
+  * Control plane API.
 
 
 Removed Items
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
index 42a4693bd9..a6a5680d2b 100644
--- a/lib/dmadev/rte_dmadev.c
+++ b/lib/dmadev/rte_dmadev.c
@@ -201,6 +201,9 @@ rte_dma_pmd_release(const char *name)
 	if (dev == NULL)
 		return -EINVAL;
 
+	if (dev->state == RTE_DMA_DEV_READY)
+		return rte_dma_close(dev->dev_id);
+
 	dma_release(dev);
 	return 0;
 }
@@ -244,3 +247,360 @@ rte_dma_count_avail(void)
 
 	return count;
 }
+
+int
+rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
+{
+	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	if (!rte_dma_is_valid(dev_id) || dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dma_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dma_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->numa_node = dev->device->numa_node;
+	dev_info->nb_vchans = dev->dev_conf.nb_vchans;
+
+	return 0;
+}
+
+int
+rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	struct rte_dma_info dev_info;
+	int ret;
+
+	if (!rte_dma_is_valid(dev_id) || dev_conf == NULL)
+		return -EINVAL;
+
+	if (dev->dev_started != 0) {
+		RTE_DMA_LOG(ERR,
+			"Device %d must be stopped to allow configuration",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dma_info_get(dev_id, &dev_info);
+	if (ret != 0) {
+		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans == 0) {
+		RTE_DMA_LOG(ERR,
+			"Device %d configure zero vchans", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans > dev_info.max_vchans) {
+		RTE_DMA_LOG(ERR,
+			"Device %d configure too many vchans", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_silent &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) {
+		RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
+					     sizeof(struct rte_dma_conf));
+	if (ret == 0)
+		memcpy(&dev->dev_conf, dev_conf, sizeof(struct rte_dma_conf));
+
+	return ret;
+}
+
+int
+rte_dma_start(int16_t dev_id)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	if (!rte_dma_is_valid(dev_id))
+		return -EINVAL;
+
+	if (dev->dev_conf.nb_vchans == 0) {
+		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
+		return -EINVAL;
+	}
+
+	if (dev->dev_started != 0) {
+		RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dma_stop(int16_t dev_id)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	if (!rte_dma_is_valid(dev_id))
+		return -EINVAL;
+
+	if (dev->dev_started == 0) {
+		RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dma_close(int16_t dev_id)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	if (!rte_dma_is_valid(dev_id))
+		return -EINVAL;
+
+	/* Device must be stopped before it can be closed */
+	if (dev->dev_started == 1) {
+		RTE_DMA_LOG(ERR,
+			"Device %d must be stopped before closing", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_close)(dev);
+	if (ret == 0)
+		dma_release(dev);
+
+	return ret;
+}
+
+int
+rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+		    const struct rte_dma_vchan_conf *conf)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	struct rte_dma_info dev_info;
+	bool src_is_dev, dst_is_dev;
+	int ret;
+
+	if (!rte_dma_is_valid(dev_id) || conf == NULL)
+		return -EINVAL;
+
+	if (dev->dev_started != 0) {
+		RTE_DMA_LOG(ERR,
+			"Device %d must be stopped to allow configuration",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dma_info_get(dev_id, &dev_info);
+	if (ret != 0) {
+		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+		return -EINVAL;
+	}
+	if (dev->dev_conf.nb_vchans == 0) {
+		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
+		return -EINVAL;
+	}
+	if (vchan >= dev_info.nb_vchans) {
+		RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+		RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support mem2mem transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support mem2dev transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support dev2mem transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support dev2dev transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < dev_info.min_desc ||
+	    conf->nb_desc > dev_info.max_desc) {
+		RTE_DMA_LOG(ERR,
+			"Device %d number of descriptors invalid", dev_id);
+		return -EINVAL;
+	}
+	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) ||
+	    (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) {
+		RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id);
+		return -EINVAL;
+	}
+	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) ||
+	    (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d destination port type invalid", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
+					sizeof(struct rte_dma_vchan_conf));
+}
+
+int
+rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
+{
+	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+	if (!rte_dma_is_valid(dev_id) || stats == NULL)
+		return -EINVAL;
+
+	if (vchan >= dev->dev_conf.nb_vchans &&
+	    vchan != RTE_DMA_ALL_VCHAN) {
+		RTE_DMA_LOG(ERR,
+			"Device %d vchan %u out of range", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	memset(stats, 0, sizeof(struct rte_dma_stats));
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dma_stats));
+}
+
+int
+rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+	if (!rte_dma_is_valid(dev_id))
+		return -EINVAL;
+
+	if (vchan >= dev->dev_conf.nb_vchans &&
+	    vchan != RTE_DMA_ALL_VCHAN) {
+		RTE_DMA_LOG(ERR,
+			"Device %d vchan %u out of range", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+static const char *
+dma_capability_name(uint64_t capability)
+{
+	static const struct {
+		uint64_t capability;
+		const char *name;
+	} capa_names[] = {
+		{ RTE_DMA_CAPA_MEM_TO_MEM,  "mem2mem" },
+		{ RTE_DMA_CAPA_MEM_TO_DEV,  "mem2dev" },
+		{ RTE_DMA_CAPA_DEV_TO_MEM,  "dev2mem" },
+		{ RTE_DMA_CAPA_DEV_TO_DEV,  "dev2dev" },
+		{ RTE_DMA_CAPA_SVA,         "sva"     },
+		{ RTE_DMA_CAPA_SILENT,      "silent"  },
+		{ RTE_DMA_CAPA_OPS_COPY,    "copy"    },
+		{ RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" },
+		{ RTE_DMA_CAPA_OPS_FILL,    "fill"    },
+	};
+
+	const char *name = "unknown";
+	uint32_t i;
+
+	for (i = 0; i < RTE_DIM(capa_names); i++) {
+		if (capability == capa_names[i].capability) {
+			name = capa_names[i].name;
+			break;
+		}
+	}
+
+	return name;
+}
+
+static void
+dma_dump_capability(FILE *f, uint64_t dev_capa)
+{
+	uint64_t capa;
+
+	(void)fprintf(f, "  dev_capa: 0x%" PRIx64 " -", dev_capa);
+	while (dev_capa > 0) {
+		capa = 1ull << __builtin_ctzll(dev_capa);
+		(void)fprintf(f, " %s", dma_capability_name(capa));
+		dev_capa &= ~capa;
+	}
+	(void)fprintf(f, "\n");
+}
+
+int
+rte_dma_dump(int16_t dev_id, FILE *f)
+{
+	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	struct rte_dma_info dev_info;
+	int ret;
+
+	if (!rte_dma_is_valid(dev_id) || f == NULL)
+		return -EINVAL;
+
+	ret = rte_dma_info_get(dev_id, &dev_info);
+	if (ret != 0) {
+		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+		return -EINVAL;
+	}
+
+	(void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
+		dev->dev_id,
+		dev->dev_name,
+		dev->dev_started ? "started" : "stopped");
+	dma_dump_capability(f, dev_info.dev_capa);
+	(void)fprintf(f, "  max_vchans_supported: %u\n", dev_info.max_vchans);
+	(void)fprintf(f, "  nb_vchans_configured: %u\n", dev_info.nb_vchans);
+	(void)fprintf(f, "  silent_mode: %s\n",
+		dev->dev_conf.enable_silent ? "on" : "off");
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 87810f2f08..34a4c26851 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -48,6 +48,29 @@
  * This framework uses 'int16_t dev_id' as the device identifier of a dmadev,
  * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
  *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dma_configure()
+ *     - rte_dma_vchan_setup()
+ *     - rte_dma_start()
+ *
+ * Then, the application can invoke dataplane functions to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dma_configure() or rte_dma_vchan_setup()), it must invoke
+ * rte_dma_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dma_start() again. The dataplane functions should not
+ * be invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the rte_dma_close()
+ * function.
+ *
+ * About MT-safe, all the functions of the dmadev API implemented by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
  */
 
 #include <stdint.h>
@@ -126,6 +149,447 @@ bool rte_dma_is_valid(int16_t dev_id);
 __rte_experimental
 uint16_t rte_dma_count_avail(void);
 
+/**@{@name DMA capability
+ * @see struct rte_dma_info::dev_capa
+ */
+#define RTE_DMA_CAPA_MEM_TO_MEM		RTE_BIT64(0)
+/**< Support memory-to-memory transfer */
+#define RTE_DMA_CAPA_MEM_TO_DEV		RTE_BIT64(1)
+/**< Support memory-to-device transfer. */
+#define RTE_DMA_CAPA_DEV_TO_MEM		RTE_BIT64(2)
+/**< Support device-to-memory transfer. */
+#define RTE_DMA_CAPA_DEV_TO_DEV		RTE_BIT64(3)
+/**< Support device-to-device transfer. */
+#define RTE_DMA_CAPA_SVA		RTE_BIT64(4)
+/**< Support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ */
+#define RTE_DMA_CAPA_SILENT		RTE_BIT64(5)
+/**< Support work in silent mode.
+ * In this mode, application don't required to invoke rte_dma_completed*()
+ * API.
+ * @see struct rte_dma_conf::silent_mode
+ */
+#define RTE_DMA_CAPA_OPS_COPY		RTE_BIT64(32)
+/**< Support copy operation.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ */
+#define RTE_DMA_CAPA_OPS_COPY_SG	RTE_BIT64(33)
+/**< Support scatter-gather list copy operation. */
+#define RTE_DMA_CAPA_OPS_FILL		RTE_BIT64(34)
+/**< Support fill operation. */
+/**@}*/
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ *
+ * @see rte_dma_info_get
+ */
+struct rte_dma_info {
+	/** Device capabilities (RTE_DMA_CAPA_*). */
+	uint64_t dev_capa;
+	/** Maximum number of virtual DMA channels supported. */
+	uint16_t max_vchans;
+	/** Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t max_desc;
+	/** Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/** Maximum number of source or destination scatter-gather entry
+	 * supported.
+	 * If the device does not support COPY_SG capability, this value can be
+	 * zero.
+	 * If the device supports COPY_SG capability, then rte_dma_copy_sg()
+	 * parameter nb_src/nb_dst should not exceed this value.
+	 */
+	uint16_t max_sges;
+	/** NUMA node connection, -1 if unknown. */
+	int16_t numa_node;
+	/** Number of virtual DMA channel configured. */
+	uint16_t nb_vchans;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dma_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ *
+ * @see rte_dma_configure
+ */
+struct rte_dma_conf {
+	/** The number of virtual DMA channels to set up for the DMA device.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dma_info which get from rte_dma_info_get().
+	 */
+	uint16_t nb_vchans;
+	/** Indicates whether to enable silent mode.
+	 * false-default mode, true-silent mode.
+	 * This value can be set to true only when the SILENT capability is
+	 * supported.
+	 *
+	 * @see RTE_DMA_CAPA_SILENT
+	 */
+	bool enable_silent;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dma_conf
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_start(int16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dma_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stop(int16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_close(int16_t dev_id);
+
+/**
+ * DMA transfer direction defines.
+ *
+ * @see struct rte_dma_vchan_conf::direction
+ */
+enum rte_dma_direction {
+	/** DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_MEM,
+	/** DMA transfer direction - from memory to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is SoCs memory) to device (which is host memory).
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV,
+	/** DMA transfer direction - from device to memory.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to memory (which is SoCs memory).
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM,
+	/** DMA transfer direction - from device to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to the device (which is another host memory).
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV,
+};
+
+/**
+ * DMA access port type defines.
+ *
+ * @see struct rte_dma_port_param::port_type
+ */
+enum rte_dma_port_type {
+	RTE_DMA_PORT_NONE,
+	RTE_DMA_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dma_vchan_conf::src_port
+ * @see struct rte_dma_vchan_conf::dst_port
+ */
+struct rte_dma_port_param {
+	/** The device access port type.
+	 *
+	 * @see enum rte_dma_port_type
+	 */
+	enum rte_dma_port_type port_type;
+	RTE_STD_C11
+	union {
+		/** PCIe access port parameters.
+		 *
+		 * The following model shows SoC's PCIe module connects to
+		 * multiple PCIe hosts and multiple endpoints. The PCIe module
+		 * has an integrated DMA controller.
+		 *
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * \code{.unparsed}
+		 * System Bus
+		 *    |     ----------PCIe module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIe Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIe Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIe Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * \endcode
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		__extension__
+		struct {
+			uint64_t coreid : 4; /**< PCIe core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			/** The pasid filed in TLP packet. */
+			uint64_t pasid : 20;
+			/** The attributes filed in TLP packet. */
+			uint64_t attr : 3;
+			/** The processing hint filed in TLP packet. */
+			uint64_t ph : 2;
+			/** The steering tag filed in TLP packet. */
+			uint64_t st : 16;
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ *
+ * @see rte_dma_vchan_setup
+ */
+struct rte_dma_vchan_conf {
+	/** Transfer direction
+	 *
+	 * @see enum rte_dma_direction
+	 */
+	enum rte_dma_direction direction;
+	/** Number of descriptor for the virtual DMA channel */
+	uint16_t nb_desc;
+	/** 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 *
+	 * @see struct rte_dma_port_param
+	 */
+	struct rte_dma_port_param src_port;
+	/** 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 *
+	 * @see struct rte_dma_port_param
+	 */
+	struct rte_dma_port_param dst_port;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel. The value must be in the range
+ *   [0, nb_vchans - 1] previously supplied to rte_dma_configure().
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dma_vchan_conf object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+			const struct rte_dma_vchan_conf *conf);
+
+/**
+ * A structure used to retrieve statistics.
+ *
+ * @see rte_dma_stats_get
+ */
+struct rte_dma_stats {
+	/** Count of operations which were submitted to hardware. */
+	uint64_t submitted;
+	/** Count of operations which were completed, including successful and
+	 * failed completions.
+	 */
+	uint64_t completed;
+	/** Count of operations which failed to complete. */
+	uint64_t errors;
+};
+
+/**
+ * Special ID, which is used to represent all virtual DMA channels.
+ *
+ * @see rte_dma_stats_get
+ * @see rte_dma_stats_reset
+ */
+#define RTE_DMA_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMA_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dma_stats
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stats_get(int16_t dev_id, uint16_t vchan,
+		      struct rte_dma_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMA_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stats_reset(int16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_dump(int16_t dev_id, FILE *f);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
index bb09382dce..5fcf0f60b8 100644
--- a/lib/dmadev/rte_dmadev_pmd.h
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -20,6 +20,62 @@
 extern "C" {
 #endif
 
+struct rte_dma_dev;
+
+/** @internal Used to get device information of a device. */
+typedef int (*rte_dma_info_get_t)(const struct rte_dma_dev *dev,
+				  struct rte_dma_info *dev_info,
+				  uint32_t info_sz);
+
+/** @internal Used to configure a device. */
+typedef int (*rte_dma_configure_t)(struct rte_dma_dev *dev,
+				   const struct rte_dma_conf *dev_conf,
+				   uint32_t conf_sz);
+
+/** @internal Used to start a configured device. */
+typedef int (*rte_dma_start_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to stop a configured device. */
+typedef int (*rte_dma_stop_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to close a configured device. */
+typedef int (*rte_dma_close_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to allocate and set up a virtual DMA channel. */
+typedef int (*rte_dma_vchan_setup_t)(struct rte_dma_dev *dev, uint16_t vchan,
+				const struct rte_dma_vchan_conf *conf,
+				uint32_t conf_sz);
+
+/** @internal Used to retrieve basic statistics. */
+typedef int (*rte_dma_stats_get_t)(const struct rte_dma_dev *dev,
+			uint16_t vchan, struct rte_dma_stats *stats,
+			uint32_t stats_sz);
+
+/** @internal Used to reset basic statistics. */
+typedef int (*rte_dma_stats_reset_t)(struct rte_dma_dev *dev, uint16_t vchan);
+
+/** @internal Used to dump internal information. */
+typedef int (*rte_dma_dump_t)(const struct rte_dma_dev *dev, FILE *f);
+
+/**
+ * DMA device operations function pointer table.
+ *
+ * @see struct rte_dma_dev:dev_ops
+ */
+struct rte_dma_dev_ops {
+	rte_dma_info_get_t         dev_info_get;
+	rte_dma_configure_t        dev_configure;
+	rte_dma_start_t            dev_start;
+	rte_dma_stop_t             dev_stop;
+	rte_dma_close_t            dev_close;
+
+	rte_dma_vchan_setup_t      vchan_setup;
+
+	rte_dma_stats_get_t        stats_get;
+	rte_dma_stats_reset_t      stats_reset;
+
+	rte_dma_dump_t             dev_dump;
+};
 /**
  * Possible states of a DMA device.
  *
@@ -44,7 +100,12 @@ struct rte_dma_dev {
 	void *dev_private; /**< PMD-specific private data. */
 	/** Device info which supplied during device initialization. */
 	struct rte_device *device;
+	/** Functions implemented by PMD. */
+	const struct rte_dma_dev_ops *dev_ops;
+	struct rte_dma_conf dev_conf; /**< DMA device configuration. */
 	enum rte_dma_dev_state state; /**< Flag indicating the device state. */
+	__extension__
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
 	uint64_t reserved[2]; /**< Reserved for future fields. */
 } __rte_cache_aligned;
 
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index f8a0076468..e925dfcd6d 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -1,10 +1,19 @@
 EXPERIMENTAL {
 	global:
 
+	rte_dma_close;
+	rte_dma_configure;
 	rte_dma_count_avail;
 	rte_dma_dev_max;
+	rte_dma_dump;
 	rte_dma_get_dev_id_by_name;
+	rte_dma_info_get;
 	rte_dma_is_valid;
+	rte_dma_start;
+	rte_dma_stats_get;
+	rte_dma_stats_reset;
+	rte_dma_stop;
+	rte_dma_vchan_setup;
 
 	local: *;
 };
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v25 3/6] dmadev: add data plane API support
  2021-10-11  7:33 ` [dpdk-dev] [PATCH v25 0/6] support dmadev Chengwen Feng
  2021-10-11  7:33   ` [dpdk-dev] [PATCH v25 1/6] dmadev: introduce DMA device library Chengwen Feng
  2021-10-11  7:33   ` [dpdk-dev] [PATCH v25 2/6] dmadev: add control plane API support Chengwen Feng
@ 2021-10-11  7:33   ` Chengwen Feng
  2021-10-11  7:33   ` [dpdk-dev] [PATCH v25 4/6] dmadev: add multi-process support Chengwen Feng
                     ` (2 subsequent siblings)
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-10-11  7:33 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch add data plane API for dmadev.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 doc/guides/prog_guide/dmadev.rst       |  22 ++
 doc/guides/rel_notes/release_21_11.rst |   2 +-
 lib/dmadev/meson.build                 |   1 +
 lib/dmadev/rte_dmadev.c                | 112 ++++++
 lib/dmadev/rte_dmadev.h                | 451 +++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h           |  81 +++++
 lib/dmadev/rte_dmadev_pmd.h            |   2 +
 lib/dmadev/version.map                 |   7 +
 8 files changed, 677 insertions(+), 1 deletion(-)
 create mode 100644 lib/dmadev/rte_dmadev_core.h

diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
index 5c70ad3d6a..2e2a4bb62a 100644
--- a/doc/guides/prog_guide/dmadev.rst
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -96,3 +96,25 @@ can be used to get the device info and supported features.
 
 Silent mode is a special device capability which does not require the
 application to invoke dequeue APIs.
+
+
+Enqueue / Dequeue APIs
+~~~~~~~~~~~~~~~~~~~~~~
+
+Enqueue APIs such as ``rte_dma_copy`` and ``rte_dma_fill`` can be used to
+enqueue operations to hardware. If an enqueue is successful, a ``ring_idx`` is
+returned. This ``ring_idx`` can be used by applications to track per operation
+metadata in an application-defined circular ring.
+
+The ``rte_dma_submit`` API is used to issue doorbell to hardware.
+Alternatively the ``RTE_DMA_OP_FLAG_SUBMIT`` flag can be passed to the enqueue
+APIs to also issue the doorbell to hardware.
+
+There are two dequeue APIs ``rte_dma_completed`` and
+``rte_dma_completed_status``, these are used to obtain the results of the
+enqueue requests. ``rte_dma_completed`` will return the number of successfully
+completed operations. ``rte_dma_completed_status`` will return the number of
+completed operations along with the status of each operation (filled into the
+``status`` array passed by user). These two APIs can also return the last
+completed operation's ``ring_idx`` which could help user track operations within
+their own application-defined rings.
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index f935a3f395..d1d7abf694 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -144,7 +144,7 @@ New Features
 * **Introduced dmadev library with:**
 
   * Device allocation functions.
-  * Control plane API.
+  * Control and data plane API.
 
 
 Removed Items
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index f8d54c6e74..d2fc85e8c7 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -3,4 +3,5 @@
 
 sources = files('rte_dmadev.c')
 headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
 driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
index a6a5680d2b..4080ba63bd 100644
--- a/lib/dmadev/rte_dmadev.c
+++ b/lib/dmadev/rte_dmadev.c
@@ -17,6 +17,7 @@
 
 static int16_t dma_devices_max;
 
+struct rte_dma_fp_object *rte_dma_fp_objs;
 struct rte_dma_dev *rte_dma_devices;
 
 RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
@@ -97,6 +98,38 @@ dma_find_by_name(const char *name)
 	return NULL;
 }
 
+static void dma_fp_object_dummy(struct rte_dma_fp_object *obj);
+
+static int
+dma_fp_data_prepare(void)
+{
+	size_t size;
+	void *ptr;
+	int i;
+
+	if (rte_dma_fp_objs != NULL)
+		return 0;
+
+	/* Fast-path object must align cacheline, but the return value of malloc
+	 * may not be aligned to the cache line. Therefore, extra memory is
+	 * applied for realignment.
+	 * note: We do not call posix_memalign/aligned_alloc because it is
+	 * version dependent on libc.
+	 */
+	size = dma_devices_max * sizeof(struct rte_dma_fp_object) +
+		RTE_CACHE_LINE_SIZE;
+	ptr = malloc(size);
+	if (ptr == NULL)
+		return -ENOMEM;
+	memset(ptr, 0, size);
+
+	rte_dma_fp_objs = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE);
+	for (i = 0; i < dma_devices_max; i++)
+		dma_fp_object_dummy(&rte_dma_fp_objs[i]);
+
+	return 0;
+}
+
 static int
 dma_dev_data_prepare(void)
 {
@@ -117,8 +150,15 @@ dma_dev_data_prepare(void)
 static int
 dma_data_prepare(void)
 {
+	int ret;
+
 	if (dma_devices_max == 0)
 		dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
+
+	ret = dma_fp_data_prepare();
+	if (ret)
+		return ret;
+
 	return dma_dev_data_prepare();
 }
 
@@ -161,6 +201,8 @@ dma_allocate(const char *name, int numa_node, size_t private_data_size)
 	dev->dev_id = dev_id;
 	dev->numa_node = numa_node;
 	dev->dev_private = dev_private;
+	dev->fp_obj = &rte_dma_fp_objs[dev_id];
+	dma_fp_object_dummy(dev->fp_obj);
 
 	return dev;
 }
@@ -169,6 +211,7 @@ static void
 dma_release(struct rte_dma_dev *dev)
 {
 	rte_free(dev->dev_private);
+	dma_fp_object_dummy(dev->fp_obj);
 	memset(dev, 0, sizeof(struct rte_dma_dev));
 }
 
@@ -604,3 +647,72 @@ rte_dma_dump(int16_t dev_id, FILE *f)
 
 	return 0;
 }
+
+static int
+dummy_copy(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
+	   __rte_unused rte_iova_t src, __rte_unused rte_iova_t dst,
+	   __rte_unused uint32_t length, __rte_unused uint64_t flags)
+{
+	RTE_DMA_LOG(ERR, "copy is not configured or not supported.");
+	return -EINVAL;
+}
+
+static int
+dummy_copy_sg(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
+	      __rte_unused const struct rte_dma_sge *src,
+	      __rte_unused const struct rte_dma_sge *dst,
+	      __rte_unused uint16_t nb_src, __rte_unused uint16_t nb_dst,
+	      __rte_unused uint64_t flags)
+{
+	RTE_DMA_LOG(ERR, "copy_sg is not configured or not supported.");
+	return -EINVAL;
+}
+
+static int
+dummy_fill(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
+	   __rte_unused uint64_t pattern, __rte_unused rte_iova_t dst,
+	   __rte_unused uint32_t length, __rte_unused uint64_t flags)
+{
+	RTE_DMA_LOG(ERR, "fill is not configured or not supported.");
+	return -EINVAL;
+}
+
+static int
+dummy_submit(__rte_unused void *dev_private, __rte_unused uint16_t vchan)
+{
+	RTE_DMA_LOG(ERR, "submit is not configured or not supported.");
+	return -EINVAL;
+}
+
+static uint16_t
+dummy_completed(__rte_unused void *dev_private,	__rte_unused uint16_t vchan,
+		__rte_unused const uint16_t nb_cpls,
+		__rte_unused uint16_t *last_idx, __rte_unused bool *has_error)
+{
+	RTE_DMA_LOG(ERR, "completed is not configured or not supported.");
+	return 0;
+}
+
+static uint16_t
+dummy_completed_status(__rte_unused void *dev_private,
+		       __rte_unused uint16_t vchan,
+		       __rte_unused const uint16_t nb_cpls,
+		       __rte_unused uint16_t *last_idx,
+		       __rte_unused enum rte_dma_status_code *status)
+{
+	RTE_DMA_LOG(ERR,
+		    "completed_status is not configured or not supported.");
+	return 0;
+}
+
+static void
+dma_fp_object_dummy(struct rte_dma_fp_object *obj)
+{
+	obj->dev_private      = NULL;
+	obj->copy             = dummy_copy;
+	obj->copy_sg          = dummy_copy_sg;
+	obj->fill             = dummy_fill;
+	obj->submit           = dummy_submit;
+	obj->completed        = dummy_completed;
+	obj->completed_status = dummy_completed_status;
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 34a4c26851..95b6a0a810 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -65,6 +65,77 @@
  * Finally, an application can close a dmadev by invoking the rte_dma_close()
  * function.
  *
+ * The dataplane APIs include two parts:
+ * The first part is the submission of operation requests:
+ *     - rte_dma_copy()
+ *     - rte_dma_copy_sg()
+ *     - rte_dma_fill()
+ *     - rte_dma_submit()
+ *
+ * These APIs could work with different virtual DMA channels which have
+ * different contexts.
+ *
+ * The first three APIs are used to submit the operation request to the virtual
+ * DMA channel, if the submission is successful, a positive
+ * ring_idx <= UINT16_MAX is returned, otherwise a negative number is returned.
+ *
+ * The last API is used to issue doorbell to hardware, and also there are flags
+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
+ * same work.
+ * @note When enqueuing a set of jobs to the device, having a separate submit
+ * outside a loop makes for clearer code than having a check for the last
+ * iteration inside the loop to set a special submit flag.  However, for cases
+ * where one item alone is to be submitted or there is a small set of jobs to
+ * be submitted sequentially, having a submit flag provides a lower-overhead
+ * way of doing the submission while still keeping the code clean.
+ *
+ * The second part is to obtain the result of requests:
+ *     - rte_dma_completed()
+ *         - return the number of operation requests completed successfully.
+ *     - rte_dma_completed_status()
+ *         - return the number of operation requests completed.
+ *
+ * @note If the dmadev works in silent mode (@see RTE_DMA_CAPA_SILENT),
+ * application does not invoke the above two completed APIs.
+ *
+ * About the ring_idx which enqueue APIs (e.g. rte_dma_copy(), rte_dma_fill())
+ * return, the rules are as follows:
+ *     - ring_idx for each virtual DMA channel are independent.
+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *       when it reach UINT16_MAX, it wraps back to zero.
+ *     - This ring_idx can be used by applications to track per-operation
+ *       metadata in an application-defined circular ring.
+ *     - The initial ring_idx of a virtual DMA channel is zero, after the
+ *       device is stopped, the ring_idx needs to be reset to zero.
+ *
+ * One example:
+ *     - step-1: start one dmadev
+ *     - step-2: enqueue a copy operation, the ring_idx return is 0
+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1
+ *     - ...
+ *     - step-101: stop the dmadev
+ *     - step-102: start the dmadev
+ *     - step-103: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *
+ * The DMA operation address used in enqueue APIs (i.e. rte_dma_copy(),
+ * rte_dma_copy_sg(), rte_dma_fill()) is defined as rte_iova_t type.
+ *
+ * The dmadev supports two types of address: memory address and device address.
+ *
+ * - memory address: the source and destination address of the memory-to-memory
+ * transfer type, or the source address of the memory-to-device transfer type,
+ * or the destination address of the device-to-memory transfer type.
+ * @note If the device support SVA (@see RTE_DMA_CAPA_SVA), the memory address
+ * can be any VA address, otherwise it must be an IOVA address.
+ *
+ * - device address: the source and destination address of the device-to-device
+ * transfer type, or the source address of the device-to-memory transfer type,
+ * or the destination address of the memory-to-device transfer type.
+ *
  * About MT-safe, all the functions of the dmadev API implemented by a PMD are
  * lock-free functions which assume to not be invoked in parallel on different
  * logical cores to work on the same target dmadev object.
@@ -590,6 +661,386 @@ int rte_dma_stats_reset(int16_t dev_id, uint16_t vchan);
 __rte_experimental
 int rte_dma_dump(int16_t dev_id, FILE *f);
 
+/**
+ * DMA transfer result status code defines.
+ *
+ * @see rte_dma_completed_status
+ */
+enum rte_dma_status_code {
+	/** The operation completed successfully. */
+	RTE_DMA_STATUS_SUCCESSFUL,
+	/** The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_USER_ABORT,
+	/** The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPTED,
+	/** The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/** The operation failed to complete due invalid destination address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/** The operation failed to complete due invalid source or destination
+	 * address, cover the case that only knows the address error, but not
+	 * sure which address error.
+	 */
+	RTE_DMA_STATUS_INVALID_ADDR,
+	/** The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/** The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/** The operation failed to complete due bus read error. */
+	RTE_DMA_STATUS_BUS_READ_ERROR,
+	/** The operation failed to complete due bus write error. */
+	RTE_DMA_STATUS_BUS_WRITE_ERROR,
+	/** The operation failed to complete due bus error, cover the case that
+	 * only knows the bus error, but not sure which direction error.
+	 */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/** The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DATA_POISION,
+	/** The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/** The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the memory-to-device/
+	 * device-to-memory/device-to-device transfer scenario.
+	 */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/** The operation failed to complete due lookup page fault. */
+	RTE_DMA_STATUS_PAGE_FAULT,
+	/** The operation failed to complete due unknown reason.
+	 * The initial value is 256, which reserves space for future errors.
+	 */
+	RTE_DMA_STATUS_ERROR_UNKNOWN = 0x100,
+};
+
+/**
+ * A structure used to hold scatter-gather DMA operation request entry.
+ *
+ * @see rte_dma_copy_sg
+ */
+struct rte_dma_sge {
+	rte_iova_t addr; /**< The DMA operation address. */
+	uint32_t length; /**< The DMA operation length. */
+};
+
+#include "rte_dmadev_core.h"
+
+/**@{@name DMA operation flag
+ * @see rte_dma_copy()
+ * @see rte_dma_copy_sg()
+ * @see rte_dma_fill()
+ */
+#define RTE_DMA_OP_FLAG_FENCE	RTE_BIT64(0)
+/**< Fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ * If the specify DMA HW works in-order (it means it has default fence between
+ * operations), this flag could be NOP.
+ */
+#define RTE_DMA_OP_FLAG_SUBMIT	RTE_BIT64(1)
+/**< Submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ */
+#define RTE_DMA_OP_FLAG_LLC	RTE_BIT64(2)
+/**< Write data to low level cache hint.
+ * Used for performance optimization, this is just a hint, and there is no
+ * capability bit for this, driver should not return error if this flag was set.
+ */
+/**@}*/
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+static inline int
+rte_dma_copy(int16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+	     uint32_t length, uint64_t flags)
+{
+	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*obj->copy, -ENOTSUP);
+#endif
+
+	return (*obj->copy)(obj->dev_private, vchan, src, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter-gather list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter-gather list copy operation to be performed by
+ * hardware, if the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then
+ * trigger doorbell to begin this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The pointer of source scatter-gather entry array.
+ * @param dst
+ *   The pointer of destination scatter-gather entry array.
+ * @param nb_src
+ *   The number of source scatter-gather entry.
+ *   @see struct rte_dma_info::max_sges
+ * @param nb_dst
+ *   The number of destination scatter-gather entry.
+ *   @see struct rte_dma_info::max_sges
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+static inline int
+rte_dma_copy_sg(int16_t dev_id, uint16_t vchan, struct rte_dma_sge *src,
+		struct rte_dma_sge *dst, uint16_t nb_src, uint16_t nb_dst,
+		uint64_t flags)
+{
+	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || src == NULL || dst == NULL ||
+	    nb_src == 0 || nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*obj->copy_sg, -ENOTSUP);
+#endif
+
+	return (*obj->copy_sg)(obj->dev_private, vchan, src, dst, nb_src,
+			       nb_dst, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+static inline int
+rte_dma_fill(int16_t dev_id, uint16_t vchan, uint64_t pattern,
+	     rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*obj->fill, -ENOTSUP);
+#endif
+
+	return (*obj->fill)(obj->dev_private, vchan, pattern, dst, length,
+			    flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dma_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+static inline int
+rte_dma_submit(int16_t dev_id, uint16_t vchan)
+{
+	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id))
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*obj->submit, -ENOTSUP);
+#endif
+
+	return (*obj->submit)(obj->dev_private, vchan);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Return the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's ring_idx.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed. This return value
+ *   must be less than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dma_completed(int16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		  uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || nb_cpls == 0)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*obj->completed, 0);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*obj->completed)(obj->dev_private, vchan, nb_cpls, last_idx,
+				 has_error);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Return the number of operations that have been completed, and the operations
+ * result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's ring_idx.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   This is a pointer to an array of length 'nb_cpls' that holds the completion
+ *   status code of each operation.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that completed. This return value must be less
+ *   than or equal to the value of nb_cpls.
+ *   If this number is greater than zero (assuming n), then n values in the
+ *   status array are also set.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dma_completed_status(int16_t dev_id, uint16_t vchan,
+			 const uint16_t nb_cpls, uint16_t *last_idx,
+			 enum rte_dma_status_code *status)
+{
+	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || nb_cpls == 0 || status == NULL)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*obj->completed_status, 0);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*obj->completed_status)(obj->dev_private, vchan, nb_cpls,
+					last_idx, status);
+}
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000000..6947091924
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#ifndef RTE_DMADEV_CORE_H
+#define RTE_DMADEV_CORE_H
+
+/**
+ * @file
+ *
+ * DMA Device internal header.
+ *
+ * This header contains internal data types which are used by dataplane inline
+ * function.
+ *
+ * Applications should not use these functions directly.
+ */
+
+/** @internal Used to enqueue a copy operation. */
+typedef int (*rte_dma_copy_t)(void *dev_private, uint16_t vchan,
+			      rte_iova_t src, rte_iova_t dst,
+			      uint32_t length, uint64_t flags);
+
+/** @internal Used to enqueue a scatter-gather list copy operation. */
+typedef int (*rte_dma_copy_sg_t)(void *dev_private, uint16_t vchan,
+				 const struct rte_dma_sge *src,
+				 const struct rte_dma_sge *dst,
+				 uint16_t nb_src, uint16_t nb_dst,
+				 uint64_t flags);
+
+/** @internal Used to enqueue a fill operation. */
+typedef int (*rte_dma_fill_t)(void *dev_private, uint16_t vchan,
+			      uint64_t pattern, rte_iova_t dst,
+			      uint32_t length, uint64_t flags);
+
+/** @internal Used to trigger hardware to begin working. */
+typedef int (*rte_dma_submit_t)(void *dev_private, uint16_t vchan);
+
+/** @internal Used to return number of successful completed operations. */
+typedef uint16_t (*rte_dma_completed_t)(void *dev_private,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+
+/** @internal Used to return number of completed operations. */
+typedef uint16_t (*rte_dma_completed_status_t)(void *dev_private,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+
+/**
+ * @internal
+ * Fast-path dmadev functions and related data are hold in a flat array.
+ * One entry per dmadev.
+ *
+ * On 64-bit systems contents of this structure occupy exactly two 64B lines.
+ * On 32-bit systems contents of this structure fits into one 64B line.
+ *
+ * The 'dev_private' field was placed in the first cache line to optimize
+ * performance because the PMD driver mainly depends on this field.
+ */
+struct rte_dma_fp_object {
+	/** PMD-specific private data. The driver should copy
+	 * rte_dma_dev.dev_private to this field during initialization.
+	 */
+	void *dev_private;
+	rte_dma_copy_t             copy;
+	rte_dma_copy_sg_t          copy_sg;
+	rte_dma_fill_t             fill;
+	rte_dma_submit_t           submit;
+	rte_dma_completed_t        completed;
+	rte_dma_completed_status_t completed_status;
+	void *reserved_cl0;
+	/** Reserve space for future IO functions, while keeping data and
+	 * dev_ops pointers on the second cacheline.
+	 */
+	void *reserved_cl1[6];
+} __rte_cache_aligned;
+
+extern struct rte_dma_fp_object *rte_dma_fp_objs;
+
+#endif /* RTE_DMADEV_CORE_H */
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
index 5fcf0f60b8..d6d2161306 100644
--- a/lib/dmadev/rte_dmadev_pmd.h
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -100,6 +100,8 @@ struct rte_dma_dev {
 	void *dev_private; /**< PMD-specific private data. */
 	/** Device info which supplied during device initialization. */
 	struct rte_device *device;
+	/**< Fast-path functions and related data. */
+	struct rte_dma_fp_object *fp_obj;
 	/** Functions implemented by PMD. */
 	const struct rte_dma_dev_ops *dev_ops;
 	struct rte_dma_conf dev_conf; /**< DMA device configuration. */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index e925dfcd6d..e17207b212 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -2,10 +2,15 @@ EXPERIMENTAL {
 	global:
 
 	rte_dma_close;
+	rte_dma_completed;
+	rte_dma_completed_status;
 	rte_dma_configure;
+	rte_dma_copy;
+	rte_dma_copy_sg;
 	rte_dma_count_avail;
 	rte_dma_dev_max;
 	rte_dma_dump;
+	rte_dma_fill;
 	rte_dma_get_dev_id_by_name;
 	rte_dma_info_get;
 	rte_dma_is_valid;
@@ -13,6 +18,7 @@ EXPERIMENTAL {
 	rte_dma_stats_get;
 	rte_dma_stats_reset;
 	rte_dma_stop;
+	rte_dma_submit;
 	rte_dma_vchan_setup;
 
 	local: *;
@@ -22,6 +28,7 @@ INTERNAL {
 	global:
 
 	rte_dma_devices;
+	rte_dma_fp_objs;
 	rte_dma_pmd_allocate;
 	rte_dma_pmd_release;
 
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v25 4/6] dmadev: add multi-process support
  2021-10-11  7:33 ` [dpdk-dev] [PATCH v25 0/6] support dmadev Chengwen Feng
                     ` (2 preceding siblings ...)
  2021-10-11  7:33   ` [dpdk-dev] [PATCH v25 3/6] dmadev: add data " Chengwen Feng
@ 2021-10-11  7:33   ` Chengwen Feng
  2021-10-11  7:33   ` [dpdk-dev] [PATCH v25 5/6] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
  2021-10-11  7:33   ` [dpdk-dev] [PATCH v25 6/6] app/test: add dmadev API test Chengwen Feng
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-10-11  7:33 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch add multi-process support for dmadev.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 doc/guides/rel_notes/release_21_11.rst |   1 +
 lib/dmadev/rte_dmadev.c                | 181 ++++++++++++++++++++-----
 lib/dmadev/rte_dmadev_core.h           |   2 +-
 lib/dmadev/rte_dmadev_pmd.h            |  29 +++-
 4 files changed, 168 insertions(+), 45 deletions(-)

diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index d1d7abf694..af32fce1ed 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -145,6 +145,7 @@ New Features
 
   * Device allocation functions.
   * Control and data plane API.
+  * Multi-process support.
 
 
 Removed Items
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
index 4080ba63bd..2273a692b8 100644
--- a/lib/dmadev/rte_dmadev.c
+++ b/lib/dmadev/rte_dmadev.c
@@ -19,6 +19,13 @@ static int16_t dma_devices_max;
 
 struct rte_dma_fp_object *rte_dma_fp_objs;
 struct rte_dma_dev *rte_dma_devices;
+static struct {
+	/* Hold the dev_max information of the primary process. This field is
+	 * set by the primary process and is read by the secondary process.
+	 */
+	int16_t dev_max;
+	struct rte_dma_dev_data data[0];
+} *dma_devices_shared_data;
 
 RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
 #define RTE_DMA_LOG(level, ...) \
@@ -70,11 +77,11 @@ dma_find_free_id(void)
 {
 	int16_t i;
 
-	if (rte_dma_devices == NULL)
+	if (rte_dma_devices == NULL || dma_devices_shared_data == NULL)
 		return -1;
 
 	for (i = 0; i < dma_devices_max; i++) {
-		if (rte_dma_devices[i].state == RTE_DMA_DEV_UNUSED)
+		if (dma_devices_shared_data->data[i].dev_name[0] == '\0')
 			return i;
 	}
 
@@ -91,7 +98,7 @@ dma_find_by_name(const char *name)
 
 	for (i = 0; i < dma_devices_max; i++) {
 		if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) &&
-		    (!strcmp(name, rte_dma_devices[i].dev_name)))
+		    (!strcmp(name, rte_dma_devices[i].data->dev_name)))
 			return &rte_dma_devices[i];
 	}
 
@@ -147,23 +154,71 @@ dma_dev_data_prepare(void)
 	return 0;
 }
 
+static int
+dma_shared_data_prepare(void)
+{
+	const char *mz_name = "rte_dma_dev_data";
+	const struct rte_memzone *mz;
+	size_t size;
+
+	if (dma_devices_shared_data != NULL)
+		return 0;
+
+	size = sizeof(*dma_devices_shared_data) +
+		sizeof(struct rte_dma_dev_data) * dma_devices_max;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		mz = rte_memzone_reserve(mz_name, size, rte_socket_id(), 0);
+	else
+		mz = rte_memzone_lookup(mz_name);
+	if (mz == NULL)
+		return -ENOMEM;
+
+	dma_devices_shared_data = mz->addr;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		memset(dma_devices_shared_data, 0, size);
+		dma_devices_shared_data->dev_max = dma_devices_max;
+	} else {
+		dma_devices_max = dma_devices_shared_data->dev_max;
+	}
+
+	return 0;
+}
+
 static int
 dma_data_prepare(void)
 {
 	int ret;
 
-	if (dma_devices_max == 0)
-		dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
-
-	ret = dma_fp_data_prepare();
-	if (ret)
-		return ret;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		if (dma_devices_max == 0)
+			dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
+		ret = dma_fp_data_prepare();
+		if (ret)
+			return ret;
+		ret = dma_dev_data_prepare();
+		if (ret)
+			return ret;
+		ret = dma_shared_data_prepare();
+		if (ret)
+			return ret;
+	} else {
+		ret = dma_shared_data_prepare();
+		if (ret)
+			return ret;
+		ret = dma_fp_data_prepare();
+		if (ret)
+			return ret;
+		ret = dma_dev_data_prepare();
+		if (ret)
+			return ret;
+	}
 
-	return dma_dev_data_prepare();
+	return 0;
 }
 
 static struct rte_dma_dev *
-dma_allocate(const char *name, int numa_node, size_t private_data_size)
+dma_allocate_primary(const char *name, int numa_node, size_t private_data_size)
 {
 	struct rte_dma_dev *dev;
 	void *dev_private;
@@ -197,12 +252,59 @@ dma_allocate(const char *name, int numa_node, size_t private_data_size)
 	}
 
 	dev = &rte_dma_devices[dev_id];
-	rte_strscpy(dev->dev_name, name, sizeof(dev->dev_name));
-	dev->dev_id = dev_id;
-	dev->numa_node = numa_node;
-	dev->dev_private = dev_private;
-	dev->fp_obj = &rte_dma_fp_objs[dev_id];
-	dma_fp_object_dummy(dev->fp_obj);
+	dev->data = &dma_devices_shared_data->data[dev_id];
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+	dev->data->dev_id = dev_id;
+	dev->data->numa_node = numa_node;
+	dev->data->dev_private = dev_private;
+
+	return dev;
+}
+
+static struct rte_dma_dev *
+dma_attach_secondary(const char *name)
+{
+	struct rte_dma_dev *dev;
+	int16_t i;
+	int ret;
+
+	ret = dma_data_prepare();
+	if (ret < 0) {
+		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
+		return NULL;
+	}
+
+	for (i = 0; i < dma_devices_max; i++) {
+		if (!strcmp(dma_devices_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == dma_devices_max) {
+		RTE_DMA_LOG(ERR,
+			"Device %s is not driven by the primary process",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dma_devices[i];
+	dev->data = &dma_devices_shared_data->data[i];
+
+	return dev;
+}
+
+static struct rte_dma_dev *
+dma_allocate(const char *name, int numa_node, size_t private_data_size)
+{
+	struct rte_dma_dev *dev;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dma_allocate_primary(name, numa_node, private_data_size);
+	else
+		dev = dma_attach_secondary(name);
+
+	if (dev) {
+		dev->fp_obj = &rte_dma_fp_objs[dev->data->dev_id];
+		dma_fp_object_dummy(dev->fp_obj);
+	}
 
 	return dev;
 }
@@ -210,7 +312,11 @@ dma_allocate(const char *name, int numa_node, size_t private_data_size)
 static void
 dma_release(struct rte_dma_dev *dev)
 {
-	rte_free(dev->dev_private);
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		rte_free(dev->data->dev_private);
+		memset(dev->data, 0, sizeof(struct rte_dma_dev_data));
+	}
+
 	dma_fp_object_dummy(dev->fp_obj);
 	memset(dev, 0, sizeof(struct rte_dma_dev));
 }
@@ -245,7 +351,7 @@ rte_dma_pmd_release(const char *name)
 		return -EINVAL;
 
 	if (dev->state == RTE_DMA_DEV_READY)
-		return rte_dma_close(dev->dev_id);
+		return rte_dma_close(dev->data->dev_id);
 
 	dma_release(dev);
 	return 0;
@@ -263,7 +369,7 @@ rte_dma_get_dev_id_by_name(const char *name)
 	if (dev == NULL)
 		return -EINVAL;
 
-	return dev->dev_id;
+	return dev->data->dev_id;
 }
 
 bool
@@ -308,7 +414,7 @@ rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
 		return ret;
 
 	dev_info->numa_node = dev->device->numa_node;
-	dev_info->nb_vchans = dev->dev_conf.nb_vchans;
+	dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
 
 	return 0;
 }
@@ -323,7 +429,7 @@ rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
 	if (!rte_dma_is_valid(dev_id) || dev_conf == NULL)
 		return -EINVAL;
 
-	if (dev->dev_started != 0) {
+	if (dev->data->dev_started != 0) {
 		RTE_DMA_LOG(ERR,
 			"Device %d must be stopped to allow configuration",
 			dev_id);
@@ -355,7 +461,8 @@ rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
 	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
 					     sizeof(struct rte_dma_conf));
 	if (ret == 0)
-		memcpy(&dev->dev_conf, dev_conf, sizeof(struct rte_dma_conf));
+		memcpy(&dev->data->dev_conf, dev_conf,
+		       sizeof(struct rte_dma_conf));
 
 	return ret;
 }
@@ -369,12 +476,12 @@ rte_dma_start(int16_t dev_id)
 	if (!rte_dma_is_valid(dev_id))
 		return -EINVAL;
 
-	if (dev->dev_conf.nb_vchans == 0) {
+	if (dev->data->dev_conf.nb_vchans == 0) {
 		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
 		return -EINVAL;
 	}
 
-	if (dev->dev_started != 0) {
+	if (dev->data->dev_started != 0) {
 		RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
 		return 0;
 	}
@@ -387,7 +494,7 @@ rte_dma_start(int16_t dev_id)
 		return ret;
 
 mark_started:
-	dev->dev_started = 1;
+	dev->data->dev_started = 1;
 	return 0;
 }
 
@@ -400,7 +507,7 @@ rte_dma_stop(int16_t dev_id)
 	if (!rte_dma_is_valid(dev_id))
 		return -EINVAL;
 
-	if (dev->dev_started == 0) {
+	if (dev->data->dev_started == 0) {
 		RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
 		return 0;
 	}
@@ -413,7 +520,7 @@ rte_dma_stop(int16_t dev_id)
 		return ret;
 
 mark_stopped:
-	dev->dev_started = 0;
+	dev->data->dev_started = 0;
 	return 0;
 }
 
@@ -427,7 +534,7 @@ rte_dma_close(int16_t dev_id)
 		return -EINVAL;
 
 	/* Device must be stopped before it can be closed */
-	if (dev->dev_started == 1) {
+	if (dev->data->dev_started == 1) {
 		RTE_DMA_LOG(ERR,
 			"Device %d must be stopped before closing", dev_id);
 		return -EBUSY;
@@ -453,7 +560,7 @@ rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
 	if (!rte_dma_is_valid(dev_id) || conf == NULL)
 		return -EINVAL;
 
-	if (dev->dev_started != 0) {
+	if (dev->data->dev_started != 0) {
 		RTE_DMA_LOG(ERR,
 			"Device %d must be stopped to allow configuration",
 			dev_id);
@@ -465,7 +572,7 @@ rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
 		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
 		return -EINVAL;
 	}
-	if (dev->dev_conf.nb_vchans == 0) {
+	if (dev->data->dev_conf.nb_vchans == 0) {
 		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
 		return -EINVAL;
 	}
@@ -539,7 +646,7 @@ rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
 	if (!rte_dma_is_valid(dev_id) || stats == NULL)
 		return -EINVAL;
 
-	if (vchan >= dev->dev_conf.nb_vchans &&
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
 	    vchan != RTE_DMA_ALL_VCHAN) {
 		RTE_DMA_LOG(ERR,
 			"Device %d vchan %u out of range", dev_id, vchan);
@@ -560,7 +667,7 @@ rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
 	if (!rte_dma_is_valid(dev_id))
 		return -EINVAL;
 
-	if (vchan >= dev->dev_conf.nb_vchans &&
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
 	    vchan != RTE_DMA_ALL_VCHAN) {
 		RTE_DMA_LOG(ERR,
 			"Device %d vchan %u out of range", dev_id, vchan);
@@ -633,14 +740,14 @@ rte_dma_dump(int16_t dev_id, FILE *f)
 	}
 
 	(void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
-		dev->dev_id,
-		dev->dev_name,
-		dev->dev_started ? "started" : "stopped");
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
 	dma_dump_capability(f, dev_info.dev_capa);
 	(void)fprintf(f, "  max_vchans_supported: %u\n", dev_info.max_vchans);
 	(void)fprintf(f, "  nb_vchans_configured: %u\n", dev_info.nb_vchans);
 	(void)fprintf(f, "  silent_mode: %s\n",
-		dev->dev_conf.enable_silent ? "on" : "off");
+		dev->data->dev_conf.enable_silent ? "on" : "off");
 
 	if (dev->dev_ops->dev_dump != NULL)
 		return (*dev->dev_ops->dev_dump)(dev, f);
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index 6947091924..ad4035e6dc 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -60,7 +60,7 @@ typedef uint16_t (*rte_dma_completed_status_t)(void *dev_private,
  */
 struct rte_dma_fp_object {
 	/** PMD-specific private data. The driver should copy
-	 * rte_dma_dev.dev_private to this field during initialization.
+	 * rte_dma_dev.data->dev_private to this field during initialization.
 	 */
 	void *dev_private;
 	rte_dma_copy_t             copy;
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
index d6d2161306..23b07a4e1c 100644
--- a/lib/dmadev/rte_dmadev_pmd.h
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -76,6 +76,27 @@ struct rte_dma_dev_ops {
 
 	rte_dma_dump_t             dev_dump;
 };
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ *
+ * @see struct rte_dma_dev::data
+ */
+struct rte_dma_dev_data {
+	char dev_name[RTE_DEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	int16_t dev_id; /**< Device [external] identifier. */
+	int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */
+	void *dev_private; /**< PMD-specific private data. */
+	struct rte_dma_conf dev_conf; /**< DMA device configuration. */
+	__extension__
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
 /**
  * Possible states of a DMA device.
  *
@@ -94,20 +115,14 @@ enum rte_dma_dev_state {
  * The generic data structure associated with each DMA device.
  */
 struct rte_dma_dev {
-	char dev_name[RTE_DEV_NAME_MAX_LEN]; /**< Unique identifier name */
-	int16_t dev_id; /**< Device [external] identifier. */
-	int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */
-	void *dev_private; /**< PMD-specific private data. */
 	/** Device info which supplied during device initialization. */
 	struct rte_device *device;
+	struct rte_dma_dev_data *data; /**< Pointer to shared device data. */
 	/**< Fast-path functions and related data. */
 	struct rte_dma_fp_object *fp_obj;
 	/** Functions implemented by PMD. */
 	const struct rte_dma_dev_ops *dev_ops;
-	struct rte_dma_conf dev_conf; /**< DMA device configuration. */
 	enum rte_dma_dev_state state; /**< Flag indicating the device state. */
-	__extension__
-	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
 	uint64_t reserved[2]; /**< Reserved for future fields. */
 } __rte_cache_aligned;
 
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v25 5/6] dma/skeleton: introduce skeleton dmadev driver
  2021-10-11  7:33 ` [dpdk-dev] [PATCH v25 0/6] support dmadev Chengwen Feng
                     ` (3 preceding siblings ...)
  2021-10-11  7:33   ` [dpdk-dev] [PATCH v25 4/6] dmadev: add multi-process support Chengwen Feng
@ 2021-10-11  7:33   ` Chengwen Feng
  2021-10-11  7:33   ` [dpdk-dev] [PATCH v25 6/6] app/test: add dmadev API test Chengwen Feng
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-10-11  7:33 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

Skeleton dmadevice driver, on the lines of rawdev skeleton, is for
showcasing of the dmadev library.

Design of skeleton involves a virtual device which is plugged into VDEV
bus on initialization.

Also, enable compilation of dmadev skeleton drivers.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 MAINTAINERS                            |   1 +
 drivers/dma/meson.build                |   4 +-
 drivers/dma/skeleton/meson.build       |   7 +
 drivers/dma/skeleton/skeleton_dmadev.c | 571 +++++++++++++++++++++++++
 drivers/dma/skeleton/skeleton_dmadev.h |  61 +++
 drivers/dma/skeleton/version.map       |   3 +
 6 files changed, 646 insertions(+), 1 deletion(-)
 create mode 100644 drivers/dma/skeleton/meson.build
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
 create mode 100644 drivers/dma/skeleton/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 119cfaa04e..ec887ac49f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -457,6 +457,7 @@ F: doc/guides/regexdevs/features/default.ini
 DMA device API - EXPERIMENTAL
 M: Chengwen Feng <fengchengwen@huawei.com>
 F: lib/dmadev/
+F: drivers/dma/skeleton/
 F: doc/guides/prog_guide/dmadev.rst
 
 Eventdev API
diff --git a/drivers/dma/meson.build b/drivers/dma/meson.build
index a24c56d8ff..d9c7ede32f 100644
--- a/drivers/dma/meson.build
+++ b/drivers/dma/meson.build
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright 2021 HiSilicon Limited
 
-drivers = []
+drivers = [
+        'skeleton',
+]
diff --git a/drivers/dma/skeleton/meson.build b/drivers/dma/skeleton/meson.build
new file mode 100644
index 0000000000..8871b80956
--- /dev/null
+++ b/drivers/dma/skeleton/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited
+
+deps += ['dmadev', 'kvargs', 'ring', 'bus_vdev']
+sources = files(
+        'skeleton_dmadev.c',
+)
diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c
new file mode 100644
index 0000000000..22a73c6178
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.c
@@ -0,0 +1,571 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+#include <inttypes.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_cycles.h>
+#include <rte_eal.h>
+#include <rte_kvargs.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+
+#include <rte_dmadev_pmd.h>
+
+#include "skeleton_dmadev.h"
+
+RTE_LOG_REGISTER_DEFAULT(skeldma_logtype, INFO);
+#define SKELDMA_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, skeldma_logtype, "%s(): " fmt "\n", \
+		__func__, ##args)
+
+/* Count of instances, currently only 1 is supported. */
+static uint16_t skeldma_count;
+
+static int
+skeldma_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
+		 uint32_t info_sz)
+{
+#define SKELDMA_MAX_DESC	8192
+#define SKELDMA_MIN_DESC	32
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(info_sz);
+
+	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
+			     RTE_DMA_CAPA_SVA |
+			     RTE_DMA_CAPA_OPS_COPY;
+	dev_info->max_vchans = 1;
+	dev_info->max_desc = SKELDMA_MAX_DESC;
+	dev_info->min_desc = SKELDMA_MIN_DESC;
+
+	return 0;
+}
+
+static int
+skeldma_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf,
+		  uint32_t conf_sz)
+{
+	RTE_SET_USED(dev);
+	RTE_SET_USED(conf);
+	RTE_SET_USED(conf_sz);
+	return 0;
+}
+
+static void *
+cpucopy_thread(void *param)
+{
+#define SLEEP_THRESHOLD		10000
+#define SLEEP_US_VAL		10
+
+	struct rte_dma_dev *dev = param;
+	struct skeldma_hw *hw = dev->data->dev_private;
+	struct skeldma_desc *desc = NULL;
+	int ret;
+
+	while (!hw->exit_flag) {
+		ret = rte_ring_dequeue(hw->desc_running, (void **)&desc);
+		if (ret) {
+			hw->zero_req_count++;
+			if (hw->zero_req_count == 0)
+				hw->zero_req_count = SLEEP_THRESHOLD;
+			if (hw->zero_req_count >= SLEEP_THRESHOLD)
+				rte_delay_us_sleep(SLEEP_US_VAL);
+			continue;
+		}
+
+		hw->zero_req_count = 0;
+		rte_memcpy(desc->dst, desc->src, desc->len);
+		hw->completed_count++;
+		(void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
+	}
+
+	return NULL;
+}
+
+static void
+fflush_ring(struct skeldma_hw *hw, struct rte_ring *ring)
+{
+	struct skeldma_desc *desc = NULL;
+	while (rte_ring_count(ring) > 0) {
+		(void)rte_ring_dequeue(ring, (void **)&desc);
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+}
+
+static int
+skeldma_start(struct rte_dma_dev *dev)
+{
+	struct skeldma_hw *hw = dev->data->dev_private;
+	rte_cpuset_t cpuset;
+	int ret;
+
+	if (hw->desc_mem == NULL) {
+		SKELDMA_LOG(ERR, "Vchan was not setup, start fail!");
+		return -EINVAL;
+	}
+
+	/* Reset the dmadev to a known state, include:
+	 * 1) fflush pending/running/completed ring to empty ring.
+	 * 2) init ring idx to zero.
+	 * 3) init running statistics.
+	 * 4) mark cpucopy task exit_flag to false.
+	 */
+	fflush_ring(hw, hw->desc_pending);
+	fflush_ring(hw, hw->desc_running);
+	fflush_ring(hw, hw->desc_completed);
+	hw->ridx = 0;
+	hw->submitted_count = 0;
+	hw->zero_req_count = 0;
+	hw->completed_count = 0;
+	hw->exit_flag = false;
+
+	rte_mb();
+
+	ret = rte_ctrl_thread_create(&hw->thread, "dma_skeleton", NULL,
+				     cpucopy_thread, dev);
+	if (ret) {
+		SKELDMA_LOG(ERR, "Start cpucopy thread fail!");
+		return -EINVAL;
+	}
+
+	if (hw->lcore_id != -1) {
+		cpuset = rte_lcore_cpuset(hw->lcore_id);
+		ret = pthread_setaffinity_np(hw->thread, sizeof(cpuset),
+					     &cpuset);
+		if (ret)
+			SKELDMA_LOG(WARNING,
+				"Set thread affinity lcore = %d fail!",
+				hw->lcore_id);
+	}
+
+	return 0;
+}
+
+static int
+skeldma_stop(struct rte_dma_dev *dev)
+{
+	struct skeldma_hw *hw = dev->data->dev_private;
+
+	hw->exit_flag = true;
+	rte_delay_ms(1);
+
+	pthread_cancel(hw->thread);
+	pthread_join(hw->thread, NULL);
+
+	return 0;
+}
+
+static int
+vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc)
+{
+	struct skeldma_desc *desc;
+	struct rte_ring *empty;
+	struct rte_ring *pending;
+	struct rte_ring *running;
+	struct rte_ring *completed;
+	uint16_t i;
+
+	desc = rte_zmalloc_socket("dma_skelteon_desc",
+				  nb_desc * sizeof(struct skeldma_desc),
+				  RTE_CACHE_LINE_SIZE, hw->socket_id);
+	if (desc == NULL) {
+		SKELDMA_LOG(ERR, "Malloc dma skeleton desc fail!");
+		return -ENOMEM;
+	}
+
+	empty = rte_ring_create("dma_skeleton_desc_empty", nb_desc,
+				hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	pending = rte_ring_create("dma_skeleton_desc_pending", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	running = rte_ring_create("dma_skeleton_desc_running", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	completed = rte_ring_create("dma_skeleton_desc_completed", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (empty == NULL || pending == NULL || running == NULL ||
+	    completed == NULL) {
+		SKELDMA_LOG(ERR, "Create dma skeleton desc ring fail!");
+		rte_ring_free(empty);
+		rte_ring_free(pending);
+		rte_ring_free(running);
+		rte_ring_free(completed);
+		rte_free(desc);
+		return -ENOMEM;
+	}
+
+	/* The real usable ring size is *count-1* instead of *count* to
+	 * differentiate a free ring from an empty ring.
+	 * @see rte_ring_create
+	 */
+	for (i = 0; i < nb_desc - 1; i++)
+		(void)rte_ring_enqueue(empty, (void *)(desc + i));
+
+	hw->desc_mem = desc;
+	hw->desc_empty = empty;
+	hw->desc_pending = pending;
+	hw->desc_running = running;
+	hw->desc_completed = completed;
+
+	return 0;
+}
+
+static void
+vchan_release(struct skeldma_hw *hw)
+{
+	if (hw->desc_mem == NULL)
+		return;
+
+	rte_free(hw->desc_mem);
+	hw->desc_mem = NULL;
+	rte_ring_free(hw->desc_empty);
+	hw->desc_empty = NULL;
+	rte_ring_free(hw->desc_pending);
+	hw->desc_pending = NULL;
+	rte_ring_free(hw->desc_running);
+	hw->desc_running = NULL;
+	rte_ring_free(hw->desc_completed);
+	hw->desc_completed = NULL;
+}
+
+static int
+skeldma_close(struct rte_dma_dev *dev)
+{
+	/* The device already stopped */
+	vchan_release(dev->data->dev_private);
+	return 0;
+}
+
+static int
+skeldma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
+		    const struct rte_dma_vchan_conf *conf,
+		    uint32_t conf_sz)
+{
+	struct skeldma_hw *hw = dev->data->dev_private;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(conf_sz);
+
+	if (!rte_is_power_of_2(conf->nb_desc)) {
+		SKELDMA_LOG(ERR, "Number of desc must be power of 2!");
+		return -EINVAL;
+	}
+
+	vchan_release(hw);
+	return vchan_setup(hw, conf->nb_desc);
+}
+
+static int
+skeldma_stats_get(const struct rte_dma_dev *dev, uint16_t vchan,
+		  struct rte_dma_stats *stats, uint32_t stats_sz)
+{
+	struct skeldma_hw *hw = dev->data->dev_private;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(stats_sz);
+
+	stats->submitted = hw->submitted_count;
+	stats->completed = hw->completed_count;
+	stats->errors = 0;
+
+	return 0;
+}
+
+static int
+skeldma_stats_reset(struct rte_dma_dev *dev, uint16_t vchan)
+{
+	struct skeldma_hw *hw = dev->data->dev_private;
+
+	RTE_SET_USED(vchan);
+
+	hw->submitted_count = 0;
+	hw->completed_count = 0;
+
+	return 0;
+}
+
+static int
+skeldma_dump(const struct rte_dma_dev *dev, FILE *f)
+{
+#define GET_RING_COUNT(ring)	((ring) ? (rte_ring_count(ring)) : 0)
+
+	struct skeldma_hw *hw = dev->data->dev_private;
+
+	(void)fprintf(f,
+		"    lcore_id: %d\n"
+		"    socket_id: %d\n"
+		"    desc_empty_ring_count: %u\n"
+		"    desc_pending_ring_count: %u\n"
+		"    desc_running_ring_count: %u\n"
+		"    desc_completed_ring_count: %u\n",
+		hw->lcore_id, hw->socket_id,
+		GET_RING_COUNT(hw->desc_empty),
+		GET_RING_COUNT(hw->desc_pending),
+		GET_RING_COUNT(hw->desc_running),
+		GET_RING_COUNT(hw->desc_completed));
+	(void)fprintf(f,
+		"    next_ring_idx: %u\n"
+		"    submitted_count: %" PRIu64 "\n"
+		"    completed_count: %" PRIu64 "\n",
+		hw->ridx, hw->submitted_count, hw->completed_count);
+
+	return 0;
+}
+
+static inline void
+submit(struct skeldma_hw *hw, struct skeldma_desc *desc)
+{
+	uint16_t count = rte_ring_count(hw->desc_pending);
+	struct skeldma_desc *pend_desc = NULL;
+
+	while (count > 0) {
+		(void)rte_ring_dequeue(hw->desc_pending, (void **)&pend_desc);
+		(void)rte_ring_enqueue(hw->desc_running, (void *)pend_desc);
+		count--;
+	}
+
+	if (desc)
+		(void)rte_ring_enqueue(hw->desc_running, (void *)desc);
+}
+
+static int
+skeldma_copy(void *dev_private, uint16_t vchan,
+	     rte_iova_t src, rte_iova_t dst,
+	     uint32_t length, uint64_t flags)
+{
+	struct skeldma_hw *hw = dev_private;
+	struct skeldma_desc *desc;
+	int ret;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(flags);
+
+	ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc);
+	if (ret)
+		return -ENOSPC;
+	desc->src = (void *)(uintptr_t)src;
+	desc->dst = (void *)(uintptr_t)dst;
+	desc->len = length;
+	desc->ridx = hw->ridx;
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT)
+		submit(hw, desc);
+	else
+		(void)rte_ring_enqueue(hw->desc_pending, (void *)desc);
+	hw->submitted_count++;
+
+	return hw->ridx++;
+}
+
+static int
+skeldma_submit(void *dev_private, uint16_t vchan)
+{
+	struct skeldma_hw *hw = dev_private;
+	RTE_SET_USED(vchan);
+	submit(hw, NULL);
+	return 0;
+}
+
+static uint16_t
+skeldma_completed(void *dev_private,
+		  uint16_t vchan, const uint16_t nb_cpls,
+		  uint16_t *last_idx, bool *has_error)
+{
+	struct skeldma_hw *hw = dev_private;
+	struct skeldma_desc *desc = NULL;
+	uint16_t index = 0;
+	uint16_t count;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(has_error);
+
+	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+	while (index < count) {
+		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+		if (index == count - 1)
+			*last_idx = desc->ridx;
+		index++;
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+
+	return count;
+}
+
+static uint16_t
+skeldma_completed_status(void *dev_private,
+			 uint16_t vchan, const uint16_t nb_cpls,
+			 uint16_t *last_idx, enum rte_dma_status_code *status)
+{
+	struct skeldma_hw *hw = dev_private;
+	struct skeldma_desc *desc = NULL;
+	uint16_t index = 0;
+	uint16_t count;
+
+	RTE_SET_USED(vchan);
+
+	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+	while (index < count) {
+		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+		if (index == count - 1)
+			*last_idx = desc->ridx;
+		status[index++] = RTE_DMA_STATUS_SUCCESSFUL;
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+
+	return count;
+}
+
+static const struct rte_dma_dev_ops skeldma_ops = {
+	.dev_info_get     = skeldma_info_get,
+	.dev_configure    = skeldma_configure,
+	.dev_start        = skeldma_start,
+	.dev_stop         = skeldma_stop,
+	.dev_close        = skeldma_close,
+
+	.vchan_setup      = skeldma_vchan_setup,
+
+	.stats_get        = skeldma_stats_get,
+	.stats_reset      = skeldma_stats_reset,
+
+	.dev_dump         = skeldma_dump,
+};
+
+static int
+skeldma_create(const char *name, struct rte_vdev_device *vdev, int lcore_id)
+{
+	struct rte_dma_dev *dev;
+	struct skeldma_hw *hw;
+	int socket_id;
+
+	socket_id = (lcore_id < 0) ? rte_socket_id() :
+				     rte_lcore_to_socket_id(lcore_id);
+	dev = rte_dma_pmd_allocate(name, socket_id, sizeof(struct skeldma_hw));
+	if (dev == NULL) {
+		SKELDMA_LOG(ERR, "Unable to allocate dmadev: %s", name);
+		return -EINVAL;
+	}
+
+	dev->device = &vdev->device;
+	dev->dev_ops = &skeldma_ops;
+	dev->fp_obj->dev_private = dev->data->dev_private;
+	dev->fp_obj->copy = skeldma_copy;
+	dev->fp_obj->submit = skeldma_submit;
+	dev->fp_obj->completed = skeldma_completed;
+	dev->fp_obj->completed_status = skeldma_completed_status;
+
+	hw = dev->data->dev_private;
+	hw->lcore_id = lcore_id;
+	hw->socket_id = socket_id;
+
+	dev->state = RTE_DMA_DEV_READY;
+
+	return dev->data->dev_id;
+}
+
+static int
+skeldma_destroy(const char *name)
+{
+	return rte_dma_pmd_release(name);
+}
+
+static int
+skeldma_parse_lcore(const char *key __rte_unused,
+		    const char *value,
+		    void *opaque)
+{
+	int lcore_id = atoi(value);
+	if (lcore_id >= 0 && lcore_id < RTE_MAX_LCORE)
+		*(int *)opaque = lcore_id;
+	return 0;
+}
+
+static void
+skeldma_parse_vdev_args(struct rte_vdev_device *vdev, int *lcore_id)
+{
+	static const char *const args[] = {
+		SKELDMA_ARG_LCORE,
+		NULL
+	};
+
+	struct rte_kvargs *kvlist;
+	const char *params;
+
+	params = rte_vdev_device_args(vdev);
+	if (params == NULL || params[0] == '\0')
+		return;
+
+	kvlist = rte_kvargs_parse(params, args);
+	if (!kvlist)
+		return;
+
+	(void)rte_kvargs_process(kvlist, SKELDMA_ARG_LCORE,
+				 skeldma_parse_lcore, lcore_id);
+	SKELDMA_LOG(INFO, "Parse lcore_id = %d", *lcore_id);
+
+	rte_kvargs_free(kvlist);
+}
+
+static int
+skeldma_probe(struct rte_vdev_device *vdev)
+{
+	const char *name;
+	int lcore_id = -1;
+	int ret;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		SKELDMA_LOG(ERR, "Multiple process not supported for %s", name);
+		return -EINVAL;
+	}
+
+	/* More than one instance is not supported */
+	if (skeldma_count > 0) {
+		SKELDMA_LOG(ERR, "Multiple instance not supported for %s",
+			name);
+		return -EINVAL;
+	}
+
+	skeldma_parse_vdev_args(vdev, &lcore_id);
+
+	ret = skeldma_create(name, vdev, lcore_id);
+	if (ret >= 0) {
+		SKELDMA_LOG(INFO, "Create %s dmadev with lcore-id %d",
+			name, lcore_id);
+		skeldma_count = 1;
+	}
+
+	return ret < 0 ? ret : 0;
+}
+
+static int
+skeldma_remove(struct rte_vdev_device *vdev)
+{
+	const char *name;
+	int ret;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -1;
+
+	ret = skeldma_destroy(name);
+	if (!ret) {
+		skeldma_count = 0;
+		SKELDMA_LOG(INFO, "Remove %s dmadev", name);
+	}
+
+	return ret;
+}
+
+static struct rte_vdev_driver skeldma_pmd_drv = {
+	.probe = skeldma_probe,
+	.remove = skeldma_remove,
+	.drv_flags = RTE_VDEV_DRV_NEED_IOVA_AS_VA,
+};
+
+RTE_PMD_REGISTER_VDEV(dma_skeleton, skeldma_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(dma_skeleton,
+		SKELDMA_ARG_LCORE "=<uint16> ");
diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h
new file mode 100644
index 0000000000..eaa52364bf
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+#ifndef SKELETON_DMADEV_H
+#define SKELETON_DMADEV_H
+
+#include <pthread.h>
+
+#include <rte_ring.h>
+
+#define SKELDMA_ARG_LCORE	"lcore"
+
+struct skeldma_desc {
+	void *src;
+	void *dst;
+	uint32_t len;
+	uint16_t ridx; /* ring idx */
+};
+
+struct skeldma_hw {
+	int lcore_id; /* cpucopy task affinity core */
+	int socket_id;
+	pthread_t thread; /* cpucopy task thread */
+	volatile int exit_flag; /* cpucopy task exit flag */
+
+	struct skeldma_desc *desc_mem;
+
+	/* Descriptor ring state machine:
+	 *
+	 *  -----------     enqueue without submit     -----------
+	 *  |  empty  |------------------------------->| pending |
+	 *  -----------\                               -----------
+	 *       ^      \------------                       |
+	 *       |                  |                       |submit doorbell
+	 *       |                  |                       |
+	 *       |                  |enqueue with submit    |
+	 *       |get completed     |------------------|    |
+	 *       |                                     |    |
+	 *       |                                     v    v
+	 *  -----------     cpucopy thread working     -----------
+	 *  |completed|<-------------------------------| running |
+	 *  -----------                                -----------
+	 */
+	struct rte_ring *desc_empty;
+	struct rte_ring *desc_pending;
+	struct rte_ring *desc_running;
+	struct rte_ring *desc_completed;
+
+	/* Cache delimiter for dataplane API's operation data */
+	char cache1 __rte_cache_aligned;
+	uint16_t ridx;  /* ring idx */
+	uint64_t submitted_count;
+
+	/* Cache delimiter for cpucopy thread's operation data */
+	char cache2 __rte_cache_aligned;
+	uint32_t zero_req_count;
+	uint64_t completed_count;
+};
+
+#endif /* SKELETON_DMADEV_H */
diff --git a/drivers/dma/skeleton/version.map b/drivers/dma/skeleton/version.map
new file mode 100644
index 0000000000..c2e0723b4c
--- /dev/null
+++ b/drivers/dma/skeleton/version.map
@@ -0,0 +1,3 @@
+DPDK_22 {
+	local: *;
+};
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v25 6/6] app/test: add dmadev API test
  2021-10-11  7:33 ` [dpdk-dev] [PATCH v25 0/6] support dmadev Chengwen Feng
                     ` (4 preceding siblings ...)
  2021-10-11  7:33   ` [dpdk-dev] [PATCH v25 5/6] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
@ 2021-10-11  7:33   ` Chengwen Feng
  5 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-10-11  7:33 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch add dmadev API test which based on 'dma_skeleton' vdev. The
test cases could be executed using 'dmadev_autotest' command in test
framework.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 MAINTAINERS                |   1 +
 app/test/meson.build       |   4 +
 app/test/test_dmadev.c     |  41 +++
 app/test/test_dmadev_api.c | 574 +++++++++++++++++++++++++++++++++++++
 app/test/test_dmadev_api.h |   5 +
 5 files changed, 625 insertions(+)
 create mode 100644 app/test/test_dmadev.c
 create mode 100644 app/test/test_dmadev_api.c
 create mode 100644 app/test/test_dmadev_api.h

diff --git a/MAINTAINERS b/MAINTAINERS
index ec887ac49f..d329873465 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -458,6 +458,7 @@ DMA device API - EXPERIMENTAL
 M: Chengwen Feng <fengchengwen@huawei.com>
 F: lib/dmadev/
 F: drivers/dma/skeleton/
+F: app/test/test_dmadev*
 F: doc/guides/prog_guide/dmadev.rst
 
 Eventdev API
diff --git a/app/test/meson.build b/app/test/meson.build
index f144d8b8ed..a16374b7a1 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -44,6 +44,8 @@ test_sources = files(
         'test_debug.c',
         'test_distributor.c',
         'test_distributor_perf.c',
+        'test_dmadev.c',
+        'test_dmadev_api.c',
         'test_eal_flags.c',
         'test_eal_fs.c',
         'test_efd.c',
@@ -163,6 +165,7 @@ test_deps = [
         'cmdline',
         'cryptodev',
         'distributor',
+        'dmadev',
         'efd',
         'ethdev',
         'eventdev',
@@ -334,6 +337,7 @@ driver_test_names = [
         'cryptodev_sw_mvsam_autotest',
         'cryptodev_sw_snow3g_autotest',
         'cryptodev_sw_zuc_autotest',
+        'dmadev_autotest',
         'eventdev_selftest_octeontx',
         'eventdev_selftest_sw',
         'rawdev_autotest',
diff --git a/app/test/test_dmadev.c b/app/test/test_dmadev.c
new file mode 100644
index 0000000000..45da6b76fe
--- /dev/null
+++ b/app/test/test_dmadev.c
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include <rte_dmadev.h>
+#include <rte_bus_vdev.h>
+
+#include "test.h"
+#include "test_dmadev_api.h"
+
+static int
+test_apis(void)
+{
+	const char *pmd = "dma_skeleton";
+	int id;
+	int ret;
+
+	if (rte_vdev_init(pmd, NULL) < 0)
+		return TEST_SKIPPED;
+	id = rte_dma_get_dev_id_by_name(pmd);
+	if (id < 0)
+		return TEST_SKIPPED;
+	printf("\n### Test dmadev infrastructure using skeleton driver\n");
+	ret = test_dma_api(id);
+	rte_vdev_uninit(pmd);
+
+	return ret;
+}
+
+static int
+test_dma(void)
+{
+	/* basic sanity on dmadev infrastructure */
+	if (test_apis() < 0)
+		return -1;
+
+	return 0;
+}
+
+REGISTER_TEST_COMMAND(dmadev_autotest, test_dma);
diff --git a/app/test/test_dmadev_api.c b/app/test/test_dmadev_api.c
new file mode 100644
index 0000000000..4a181af90a
--- /dev/null
+++ b/app/test/test_dmadev_api.c
@@ -0,0 +1,574 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+#include <string.h>
+
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_test.h>
+#include <rte_dmadev.h>
+
+extern int test_dma_api(uint16_t dev_id);
+
+#define DMA_TEST_API_RUN(test) \
+	testsuite_run_test(test, #test)
+
+#define TEST_MEMCPY_SIZE	1024
+#define TEST_WAIT_US_VAL	50000
+
+#define TEST_SUCCESS 0
+#define TEST_FAILED  -1
+
+static int16_t test_dev_id;
+static int16_t invalid_dev_id;
+
+static char *src;
+static char *dst;
+
+static int total;
+static int passed;
+static int failed;
+
+static int
+testsuite_setup(int16_t dev_id)
+{
+	test_dev_id = dev_id;
+	invalid_dev_id = -1;
+
+	src = rte_malloc("dmadev_test_src", TEST_MEMCPY_SIZE, 0);
+	if (src == NULL)
+		return -ENOMEM;
+	dst = rte_malloc("dmadev_test_dst", TEST_MEMCPY_SIZE, 0);
+	if (dst == NULL) {
+		rte_free(src);
+		src = NULL;
+		return -ENOMEM;
+	}
+
+	total = 0;
+	passed = 0;
+	failed = 0;
+
+	/* Set dmadev log level to critical to suppress unnecessary output
+	 * during API tests.
+	 */
+	rte_log_set_level_pattern("lib.dmadev", RTE_LOG_CRIT);
+
+	return 0;
+}
+
+static void
+testsuite_teardown(void)
+{
+	rte_free(src);
+	src = NULL;
+	rte_free(dst);
+	dst = NULL;
+	/* Ensure the dmadev is stopped. */
+	rte_dma_stop(test_dev_id);
+
+	rte_log_set_level_pattern("lib.dmadev", RTE_LOG_INFO);
+}
+
+static void
+testsuite_run_test(int (*test)(void), const char *name)
+{
+	int ret = 0;
+
+	if (test) {
+		ret = test();
+		if (ret < 0) {
+			failed++;
+			printf("%s Failed\n", name);
+		} else {
+			passed++;
+			printf("%s Passed\n", name);
+		}
+	}
+
+	total++;
+}
+
+static int
+test_dma_get_dev_id_by_name(void)
+{
+	int ret = rte_dma_get_dev_id_by_name("invalid_dmadev_device");
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_is_valid_dev(void)
+{
+	int ret;
+	ret = rte_dma_is_valid(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == false, "Expected false for invalid dev id");
+	ret = rte_dma_is_valid(test_dev_id);
+	RTE_TEST_ASSERT(ret == true, "Expected true for valid dev id");
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_count(void)
+{
+	uint16_t count = rte_dma_count_avail();
+	RTE_TEST_ASSERT(count > 0, "Invalid dmadev count %u", count);
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_info_get(void)
+{
+	struct rte_dma_info info =  { 0 };
+	int ret;
+
+	ret = rte_dma_info_get(invalid_dev_id, &info);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_info_get(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_configure(void)
+{
+	struct rte_dma_conf conf = { 0 };
+	struct rte_dma_info info = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_configure(invalid_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_configure(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for nb_vchans == 0 */
+	memset(&conf, 0, sizeof(conf));
+	ret = rte_dma_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for conf.nb_vchans > info.max_vchans */
+	ret = rte_dma_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans + 1;
+	ret = rte_dma_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check enable silent mode */
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans;
+	conf.enable_silent = true;
+	ret = rte_dma_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Configure success */
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans;
+	ret = rte_dma_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret);
+
+	/* Check configure success */
+	ret = rte_dma_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	RTE_TEST_ASSERT_EQUAL(conf.nb_vchans, info.nb_vchans,
+			      "Configure nb_vchans not match");
+
+	return TEST_SUCCESS;
+}
+
+static int
+check_direction(void)
+{
+	struct rte_dma_vchan_conf vchan_conf;
+	int ret;
+
+	/* Check for direction */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV + 1;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM - 1;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for direction and dev_capa combination */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_DEV;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_MEM;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	return 0;
+}
+
+static int
+check_port_type(struct rte_dma_info *dev_info)
+{
+	struct rte_dma_vchan_conf vchan_conf;
+	int ret;
+
+	/* Check src port type validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info->min_desc;
+	vchan_conf.src_port.port_type = RTE_DMA_PORT_PCIE;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check dst port type validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info->min_desc;
+	vchan_conf.dst_port.port_type = RTE_DMA_PORT_PCIE;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	return 0;
+}
+
+static int
+test_dma_vchan_setup(void)
+{
+	struct rte_dma_vchan_conf vchan_conf = { 0 };
+	struct rte_dma_conf dev_conf = { 0 };
+	struct rte_dma_info dev_info = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_vchan_setup(invalid_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_vchan_setup(test_dev_id, 0, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Make sure configure success */
+	ret = rte_dma_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	dev_conf.nb_vchans = dev_info.max_vchans;
+	ret = rte_dma_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret);
+
+	/* Check for invalid vchan */
+	ret = rte_dma_vchan_setup(test_dev_id, dev_conf.nb_vchans, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for direction */
+	ret = check_direction();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to check direction");
+
+	/* Check for nb_desc validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc - 1;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.nb_desc = dev_info.max_desc + 1;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check port type */
+	ret = check_port_type(&dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to check port type");
+
+	/* Check vchan setup success */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+setup_one_vchan(void)
+{
+	struct rte_dma_vchan_conf vchan_conf = { 0 };
+	struct rte_dma_info dev_info = { 0 };
+	struct rte_dma_conf dev_conf = { 0 };
+	int ret;
+
+	ret = rte_dma_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+	dev_conf.nb_vchans = dev_info.max_vchans;
+	ret = rte_dma_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_start_stop(void)
+{
+	struct rte_dma_vchan_conf vchan_conf = { 0 };
+	struct rte_dma_conf dev_conf = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_start(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_stop(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dma_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Check reconfigure and vchan setup when device started */
+	ret = rte_dma_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT(ret == -EBUSY, "Failed to configure, %d", ret);
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EBUSY, "Failed to setup vchan, %d", ret);
+
+	ret = rte_dma_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_stats(void)
+{
+	struct rte_dma_info dev_info = { 0 };
+	struct rte_dma_stats stats = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_stats_get(invalid_dev_id, 0, &stats);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_stats_get(invalid_dev_id, 0, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_stats_reset(invalid_dev_id, 0);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	/* Check for invalid vchan */
+	ret = rte_dma_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+	ret = rte_dma_stats_get(test_dev_id, dev_info.max_vchans, &stats);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_stats_reset(test_dev_id, dev_info.max_vchans);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for valid vchan */
+	ret = rte_dma_stats_get(test_dev_id, 0, &stats);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get stats, %d", ret);
+	ret = rte_dma_stats_get(test_dev_id, RTE_DMA_ALL_VCHAN, &stats);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get all stats, %d", ret);
+	ret = rte_dma_stats_reset(test_dev_id, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset stats, %d", ret);
+	ret = rte_dma_stats_reset(test_dev_id, RTE_DMA_ALL_VCHAN);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset all stats, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_dump(void)
+{
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_dump(invalid_dev_id, stderr);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Excepted -EINVAL, %d", ret);
+	ret = rte_dma_dump(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Excepted -EINVAL, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static void
+setup_memory(void)
+{
+	int i;
+
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++)
+		src[i] = (char)i;
+	memset(dst, 0, TEST_MEMCPY_SIZE);
+}
+
+static int
+verify_memory(void)
+{
+	int i;
+
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++) {
+		if (src[i] == dst[i])
+			continue;
+		RTE_TEST_ASSERT_EQUAL(src[i], dst[i],
+			"Failed to copy memory, %d %d", src[i], dst[i]);
+	}
+
+	return 0;
+}
+
+static int
+test_dma_completed(void)
+{
+	uint16_t last_idx = 1;
+	bool has_error = true;
+	uint16_t cpl_ret;
+	int ret;
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dma_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	setup_memory();
+
+	/* Check enqueue without submit */
+	ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+			   TEST_MEMCPY_SIZE, 0);
+	RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed(test_dev_id, 0, 1, &last_idx, &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to get completed");
+
+	/* Check add submit */
+	ret = rte_dma_submit(test_dev_id, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to submit, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed(test_dev_id, 0, 1, &last_idx, &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u",
+				last_idx);
+	RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+	ret = verify_memory();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to verify memory");
+
+	setup_memory();
+
+	/* Check for enqueue with submit */
+	ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+			   TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 1, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed(test_dev_id, 0, 1, &last_idx, &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u",
+				last_idx);
+	RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+	ret = verify_memory();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to verify memory");
+
+	/* Stop dmadev to make sure dmadev to a known state */
+	ret = rte_dma_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_completed_status(void)
+{
+	enum rte_dma_status_code status[1] = { 1 };
+	uint16_t last_idx = 1;
+	uint16_t cpl_ret, i;
+	int ret;
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dma_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Check for enqueue with submit */
+	ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+			   TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed_status(test_dev_id, 0, 1, &last_idx,
+					   status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u",
+				last_idx);
+	for (i = 0; i < RTE_DIM(status); i++)
+		RTE_TEST_ASSERT_EQUAL(status[i], 0,
+				"Failed to completed status, %d", status[i]);
+
+	/* Check do completed status again */
+	cpl_ret = rte_dma_completed_status(test_dev_id, 0, 1, &last_idx,
+					   status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to completed status");
+
+	/* Check for enqueue with submit again */
+	ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+			   TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 1, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed_status(test_dev_id, 0, 1, &last_idx,
+					   status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u",
+				last_idx);
+	for (i = 0; i < RTE_DIM(status); i++)
+		RTE_TEST_ASSERT_EQUAL(status[i], 0,
+				"Failed to completed status, %d", status[i]);
+
+	/* Stop dmadev to make sure dmadev to a known state */
+	ret = rte_dma_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+int
+test_dma_api(uint16_t dev_id)
+{
+	int ret = testsuite_setup(dev_id);
+	if (ret) {
+		printf("testsuite setup fail!\n");
+		return -1;
+	}
+
+	/* If the testcase exit successfully, ensure that the test dmadev exist
+	 * and the dmadev is in the stopped state.
+	 */
+	DMA_TEST_API_RUN(test_dma_get_dev_id_by_name);
+	DMA_TEST_API_RUN(test_dma_is_valid_dev);
+	DMA_TEST_API_RUN(test_dma_count);
+	DMA_TEST_API_RUN(test_dma_info_get);
+	DMA_TEST_API_RUN(test_dma_configure);
+	DMA_TEST_API_RUN(test_dma_vchan_setup);
+	DMA_TEST_API_RUN(test_dma_start_stop);
+	DMA_TEST_API_RUN(test_dma_stats);
+	DMA_TEST_API_RUN(test_dma_dump);
+	DMA_TEST_API_RUN(test_dma_completed);
+	DMA_TEST_API_RUN(test_dma_completed_status);
+
+	testsuite_teardown();
+
+	printf("Total tests   : %d\n", total);
+	printf("Passed        : %d\n", passed);
+	printf("Failed        : %d\n", failed);
+
+	if (failed)
+		return -1;
+
+	return 0;
+};
diff --git a/app/test/test_dmadev_api.h b/app/test/test_dmadev_api.h
new file mode 100644
index 0000000000..33fbc5bd41
--- /dev/null
+++ b/app/test/test_dmadev_api.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+int test_dma_api(uint16_t dev_id);
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v24 3/6] dmadev: add data plane API support
  2021-10-09  9:33   ` [dpdk-dev] [PATCH v24 3/6] dmadev: add data " Chengwen Feng
  2021-10-09 10:03     ` fengchengwen
@ 2021-10-11 10:40     ` Bruce Richardson
  2021-10-11 12:31       ` fengchengwen
  1 sibling, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-10-11 10:40 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

On Sat, Oct 09, 2021 at 05:33:37PM +0800, Chengwen Feng wrote:
> This patch add data plane API for dmadev.
>

A few initial comments inline. I'll work on rebasing my follow-up patchset
to this, and let you know if I have any more feedback based on that.

/Bruce
 
> diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
> index a6a5680d2b..891ceeb988 100644
> --- a/lib/dmadev/rte_dmadev.c
> +++ b/lib/dmadev/rte_dmadev.c
> @@ -17,6 +17,7 @@
>  
>  static int16_t dma_devices_max;
>  
> +struct rte_dma_fp_object *rte_dma_fp_objs;

While I think I like this approach of making more of the dmadev hidden, I
think we need a better name for this. While there is the dev_private
pointer in it, the struct is pretty much the datapath functions, so how
about "rte_dma_funcs" as a name?

>  struct rte_dma_dev *rte_dma_devices;
>  

<snip>

> +/**
> + * @internal
> + * Fast-path dmadev functions and related data are hold in a flat array.
> + * One entry per dmadev.
> + *
> + * On 64-bit systems contents of this structure occupy exactly two 64B lines.
> + * On 32-bit systems contents of this structure fits into one 64B line.
> + *
> + * The 'dev_private' field was placed in the first cache line to optimize
> + * performance because the PMD driver mainly depends on this field.
> + */
> +struct rte_dma_fp_object {
> +	void *dev_private; /**< PMD-specific private data. */
> +	rte_dma_copy_t             copy;
> +	rte_dma_copy_sg_t          copy_sg;
> +	rte_dma_fill_t             fill;
> +	rte_dma_submit_t           submit;
> +	rte_dma_completed_t        completed;
> +	rte_dma_completed_status_t completed_status;
> +	void *reserved_cl0;
> +	/** Reserve space for future IO functions, while keeping data and
> +	 * dev_ops pointers on the second cacheline.
> +	 */
This comment is out of date.

> +	void *reserved_cl1[6];
> +} __rte_cache_aligned;

Small suggestion: since there is no data at the end of the structure,
rather than adding in padding arrays which need to be adjusted as we add
fields into the struct, let's just change the "__rte_cache_aligned" macro
to "__rte_aligned(128)". This will explicitly set the size to 128-bytes and
allow us to remove the reserved fields - making it easier to add new
pointers.

> +
> +extern struct rte_dma_fp_object *rte_dma_fp_objs;
> +
> +#endif /* RTE_DMADEV_CORE_H */

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v24 3/6] dmadev: add data plane API support
  2021-10-11 10:40     ` Bruce Richardson
@ 2021-10-11 12:31       ` fengchengwen
  0 siblings, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-10-11 12:31 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

On 2021/10/11 18:40, Bruce Richardson wrote:
> On Sat, Oct 09, 2021 at 05:33:37PM +0800, Chengwen Feng wrote:
>> This patch add data plane API for dmadev.
>>
> 
> A few initial comments inline. I'll work on rebasing my follow-up patchset
> to this, and let you know if I have any more feedback based on that.
> 
> /Bruce
>  
>> diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
>> index a6a5680d2b..891ceeb988 100644
>> --- a/lib/dmadev/rte_dmadev.c
>> +++ b/lib/dmadev/rte_dmadev.c
>> @@ -17,6 +17,7 @@
>>  
>>  static int16_t dma_devices_max;
>>  
>> +struct rte_dma_fp_object *rte_dma_fp_objs;
> 
> While I think I like this approach of making more of the dmadev hidden, I
> think we need a better name for this. While there is the dev_private
> pointer in it, the struct is pretty much the datapath functions, so how
> about "rte_dma_funcs" as a name?

en, I notice ethdev and eventdev both use rte_xxx_fp_ops, but this structure
has other fileds(e.g. data pointers) in addition to ops, it's inappropriate to
use ops suffix. So I use the 'object' which is widely used in object-oriented.

It's better to use uniform naming in ethdev/eventdev/dmadev and so on, would
be happy to hear more.

> 
>>  struct rte_dma_dev *rte_dma_devices;
>>  
> 
> <snip>
> 
>> +/**
>> + * @internal
>> + * Fast-path dmadev functions and related data are hold in a flat array.
>> + * One entry per dmadev.
>> + *
>> + * On 64-bit systems contents of this structure occupy exactly two 64B lines.
>> + * On 32-bit systems contents of this structure fits into one 64B line.
>> + *
>> + * The 'dev_private' field was placed in the first cache line to optimize
>> + * performance because the PMD driver mainly depends on this field.
>> + */
>> +struct rte_dma_fp_object {
>> +	void *dev_private; /**< PMD-specific private data. */
>> +	rte_dma_copy_t             copy;
>> +	rte_dma_copy_sg_t          copy_sg;
>> +	rte_dma_fill_t             fill;
>> +	rte_dma_submit_t           submit;
>> +	rte_dma_completed_t        completed;
>> +	rte_dma_completed_status_t completed_status;
>> +	void *reserved_cl0;
>> +	/** Reserve space for future IO functions, while keeping data and
>> +	 * dev_ops pointers on the second cacheline.
>> +	 */
> This comment is out of date.
> 
>> +	void *reserved_cl1[6];
>> +} __rte_cache_aligned;
> 
> Small suggestion: since there is no data at the end of the structure,
> rather than adding in padding arrays which need to be adjusted as we add
> fields into the struct, let's just change the "__rte_cache_aligned" macro
> to "__rte_aligned(128)". This will explicitly set the size to 128-bytes and
> allow us to remove the reserved fields - making it easier to add new
> pointers.

Agree

> 
>> +
>> +extern struct rte_dma_fp_object *rte_dma_fp_objs;
>> +
>> +#endif /* RTE_DMADEV_CORE_H */
> 
> .
> 

Thanks


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v25 2/6] dmadev: add control plane API support
  2021-10-11  7:33   ` [dpdk-dev] [PATCH v25 2/6] dmadev: add control plane API support Chengwen Feng
@ 2021-10-11 15:44     ` Bruce Richardson
  2021-10-12  3:57       ` fengchengwen
  2021-10-12 18:57     ` Thomas Monjalon
  1 sibling, 1 reply; 339+ messages in thread
From: Bruce Richardson @ 2021-10-11 15:44 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

On Mon, Oct 11, 2021 at 03:33:44PM +0800, Chengwen Feng wrote:
> This patch add control plane API for dmadev.
> 
> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
> Acked-by: Morten Brørup <mb@smartsharesystems.com>
> Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
> Reviewed-by: Conor Walsh <conor.walsh@intel.com>
> ---
>  doc/guides/prog_guide/dmadev.rst       |  38 ++
>  doc/guides/rel_notes/release_21_11.rst |   1 +
>  lib/dmadev/rte_dmadev.c                | 360 +++++++++++++++++++
>  lib/dmadev/rte_dmadev.h                | 464 +++++++++++++++++++++++++
>  lib/dmadev/rte_dmadev_pmd.h            |  61 ++++
>  lib/dmadev/version.map                 |   9 +
>  6 files changed, 933 insertions(+)
> 

<snip>

> +/**
> + * A structure used to retrieve the information of a DMA device.
> + *
> + * @see rte_dma_info_get
> + */
> +struct rte_dma_info {
> +	/** Device capabilities (RTE_DMA_CAPA_*). */
> +	uint64_t dev_capa;
> +	/** Maximum number of virtual DMA channels supported. */
> +	uint16_t max_vchans;
> +	/** Maximum allowed number of virtual DMA channel descriptors. */
> +	uint16_t max_desc;
> +	/** Minimum allowed number of virtual DMA channel descriptors. */
> +	uint16_t min_desc;
> +	/** Maximum number of source or destination scatter-gather entry
> +	 * supported.
> +	 * If the device does not support COPY_SG capability, this value can be
> +	 * zero.
> +	 * If the device supports COPY_SG capability, then rte_dma_copy_sg()
> +	 * parameter nb_src/nb_dst should not exceed this value.
> +	 */
> +	uint16_t max_sges;
> +	/** NUMA node connection, -1 if unknown. */
> +	int16_t numa_node;
> +	/** Number of virtual DMA channel configured. */
> +	uint16_t nb_vchans;
> +};
> +
Since we really don't want apps to have to access the rte_dma_devices
array, I think we should add "const char *name" to the info struct. Apps may
find it useful for debugging or logging, and certainly the unit tests will
use it.

/Bruce

^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v25 2/6] dmadev: add control plane API support
  2021-10-11 15:44     ` Bruce Richardson
@ 2021-10-12  3:57       ` fengchengwen
  0 siblings, 0 replies; 339+ messages in thread
From: fengchengwen @ 2021-10-12  3:57 UTC (permalink / raw)
  To: Bruce Richardson
  Cc: thomas, ferruh.yigit, jerinj, jerinjacobk, andrew.rybchenko, dev,
	mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

On 2021/10/11 23:44, Bruce Richardson wrote:
> On Mon, Oct 11, 2021 at 03:33:44PM +0800, Chengwen Feng wrote:
>> This patch add control plane API for dmadev.
>>
>> Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
>> Acked-by: Bruce Richardson <bruce.richardson@intel.com>
>> Acked-by: Morten Brørup <mb@smartsharesystems.com>
>> Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
>> Reviewed-by: Conor Walsh <conor.walsh@intel.com>
>> ---
>>  doc/guides/prog_guide/dmadev.rst       |  38 ++
>>  doc/guides/rel_notes/release_21_11.rst |   1 +
>>  lib/dmadev/rte_dmadev.c                | 360 +++++++++++++++++++
>>  lib/dmadev/rte_dmadev.h                | 464 +++++++++++++++++++++++++
>>  lib/dmadev/rte_dmadev_pmd.h            |  61 ++++
>>  lib/dmadev/version.map                 |   9 +
>>  6 files changed, 933 insertions(+)
>>
> 
> <snip>
> 
>> +/**
>> + * A structure used to retrieve the information of a DMA device.
>> + *
>> + * @see rte_dma_info_get
>> + */
>> +struct rte_dma_info {
>> +	/** Device capabilities (RTE_DMA_CAPA_*). */
>> +	uint64_t dev_capa;
>> +	/** Maximum number of virtual DMA channels supported. */
>> +	uint16_t max_vchans;
>> +	/** Maximum allowed number of virtual DMA channel descriptors. */
>> +	uint16_t max_desc;
>> +	/** Minimum allowed number of virtual DMA channel descriptors. */
>> +	uint16_t min_desc;
>> +	/** Maximum number of source or destination scatter-gather entry
>> +	 * supported.
>> +	 * If the device does not support COPY_SG capability, this value can be
>> +	 * zero.
>> +	 * If the device supports COPY_SG capability, then rte_dma_copy_sg()
>> +	 * parameter nb_src/nb_dst should not exceed this value.
>> +	 */
>> +	uint16_t max_sges;
>> +	/** NUMA node connection, -1 if unknown. */
>> +	int16_t numa_node;
>> +	/** Number of virtual DMA channel configured. */
>> +	uint16_t nb_vchans;
>> +};
>> +
> Since we really don't want apps to have to access the rte_dma_devices
> array, I think we should add "const char *name" to the info struct. Apps may
> find it useful for debugging or logging, and certainly the unit tests will
> use it.

Agree

> 
> /Bruce
> 
> .
> 

Thanks


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v25 2/6] dmadev: add control plane API support
  2021-10-11  7:33   ` [dpdk-dev] [PATCH v25 2/6] dmadev: add control plane API support Chengwen Feng
  2021-10-11 15:44     ` Bruce Richardson
@ 2021-10-12 18:57     ` Thomas Monjalon
  1 sibling, 0 replies; 339+ messages in thread
From: Thomas Monjalon @ 2021-10-12 18:57 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko, dev, mb, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh, kevin.laatz

11/10/2021 09:33, Chengwen Feng:
> +Device Configuration
> +~~~~~~~~~~~~~~~~~~~~
> +
> +The rte_dma_configure API is used to configure a DMA device.
> +
> +.. code-block:: c
> +
> +   int rte_dma_configure(int16_t dev_id,
> +                         const struct rte_dma_conf *dev_conf);
> +
> +The ``rte_dma_conf`` structure is used to pass the configuration parameters
> +for the DMA device.
> +
> +
> +Configuration of Virtual DMA Channels
> +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> +
> +The rte_dma_vchan_setup API is used to configure a virtual DMA channel.
> +
> +.. code-block:: c
> +
> +   int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
> +                           const struct rte_dma_vchan_conf *conf);
> +
> +The ``rte_dma_vchan_conf`` structure is used to pass the configuration
> +parameters for the virtual DMA channel.

I think those 2 above sections don't bring anything in the guide.
Functions are described in Doxygen comments, it is enough.


> --- a/doc/guides/rel_notes/release_21_11.rst
> +++ b/doc/guides/rel_notes/release_21_11.rst
> @@ -144,6 +144,7 @@ New Features
>  * **Introduced dmadev library with:**
>  
>    * Device allocation functions.
> +  * Control plane API.

This is not a feature, you can drop from the release notes.


[...]
> +/**@{@name DMA capability
> + * @see struct rte_dma_info::dev_capa
> + */

Thank you for using Doxygen grouping.

> +#define RTE_DMA_CAPA_MEM_TO_MEM		RTE_BIT64(0)
> +/**< Support memory-to-memory transfer */

Would it be possible to put the comment before the flag?

> +#define RTE_DMA_CAPA_MEM_TO_DEV		RTE_BIT64(1)
> +/**< Support memory-to-device transfer. */
> +#define RTE_DMA_CAPA_DEV_TO_MEM		RTE_BIT64(2)
> +/**< Support device-to-memory transfer. */
> +#define RTE_DMA_CAPA_DEV_TO_DEV		RTE_BIT64(3)
> +/**< Support device-to-device transfer. */
> +#define RTE_DMA_CAPA_SVA		RTE_BIT64(4)
> +/**< Support SVA which could use VA as DMA address.
> + * If device support SVA then application could pass any VA address like memory
> + * from rte_malloc(), rte_memzone(), malloc, stack memory.
> + * If device don't support SVA, then application should pass IOVA address which
> + * from rte_malloc(), rte_memzone().
> + */
> +#define RTE_DMA_CAPA_SILENT		RTE_BIT64(5)
> +/**< Support work in silent mode.
> + * In this mode, application don't required to invoke rte_dma_completed*()
> + * API.
> + * @see struct rte_dma_conf::silent_mode
> + */
> +#define RTE_DMA_CAPA_OPS_COPY		RTE_BIT64(32)
> +/**< Support copy operation.
> + * This capability start with index of 32, so that it could leave gap between
> + * normal capability and ops capability.
> + */
> +#define RTE_DMA_CAPA_OPS_COPY_SG	RTE_BIT64(33)
> +/**< Support scatter-gather list copy operation. */
> +#define RTE_DMA_CAPA_OPS_FILL		RTE_BIT64(34)
> +/**< Support fill operation. */
> +/**@}*/




^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v25 1/6] dmadev: introduce DMA device library
  2021-10-11  7:33   ` [dpdk-dev] [PATCH v25 1/6] dmadev: introduce DMA device library Chengwen Feng
@ 2021-10-12 19:09     ` Thomas Monjalon
  2021-10-13  0:21       ` fengchengwen
  0 siblings, 1 reply; 339+ messages in thread
From: Thomas Monjalon @ 2021-10-12 19:09 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko, dev, mb, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh, kevin.laatz

11/10/2021 09:33, Chengwen Feng:
> --- /dev/null
> +++ b/doc/guides/prog_guide/dmadev.rst
> @@ -0,0 +1,60 @@
> +.. SPDX-License-Identifier: BSD-3-Clause
> +   Copyright 2021 HiSilicon Limited
> +
> +DMA Device Library
> +==================
> +
> +The DMA library provides a DMA device framework for management and provisioning
> +of hardware and software DMA poll mode drivers, defining generic API which
> +support a number of different DMA operations.
> +
> +
> +Design Principles
> +-----------------
> +
> +The DMA framework provides a generic DMA device framework which supports both
> +physical (hardware) and virtual (software) DMA devices, as well as a generic DMA
> +API which allows DMA devices to be managed and configured, and supports DMA
> +operations to be provisioned on DMA poll mode driver.
> +
> +.. _figure_dmadev:
> +
> +.. figure:: img/dmadev.*
> +
> +The above figure shows the model on which the DMA framework is built on:
> +
> + * The DMA controller could have multiple hardware DMA channels (aka. hardware
> +   DMA queues), each hardware DMA channel should be represented by a dmadev.
> + * The dmadev could create multiple virtual DMA channels, each virtual DMA
> +   channel represents a different transfer context. The DMA operation request
> +   must be submitted to the virtual DMA channel. e.g. Application could create
> +   virtual DMA channel 0 for memory-to-memory transfer scenario, and create
> +   virtual DMA channel 1 for memory-to-device transfer scenario.

When updating the doc, we would like to change a minimum of lines,
so it's better to split lines logically: after a comma, a point,
or before the next part of the sentence.
Do not hesitate to make short lines if needed.
Such change is quite fast to do, thanks.

[...]
> +* **Introduced dmadev library with:**
> +
> +  * Device allocation functions.

You can drop this line, it is not a feature.

[...]
> +static int
> +dma_dev_data_prepare(void)
> +{
> +	size_t size;
> +
> +	if (rte_dma_devices != NULL)
> +		return 0;
> +
> +	size = dma_devices_max * sizeof(struct rte_dma_dev);
> +	rte_dma_devices = malloc(size);
> +	if (rte_dma_devices == NULL)
> +		return -ENOMEM;
> +	memset(rte_dma_devices, 0, size);
> +
> +	return 0;
> +}
> +
> +static int
> +dma_data_prepare(void)
> +{
> +	if (dma_devices_max == 0)
> +		dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
> +	return dma_dev_data_prepare();
> +}
> +
> +static struct rte_dma_dev *
> +dma_allocate(const char *name, int numa_node, size_t private_data_size)
> +{
> +	struct rte_dma_dev *dev;
> +	void *dev_private;
> +	int16_t dev_id;
> +	int ret;
> +
> +	ret = dma_data_prepare();
> +	if (ret < 0) {
> +		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
> +		return NULL;
> +	}
> +
> +	dev = dma_find_by_name(name);
> +	if (dev != NULL) {
> +		RTE_DMA_LOG(ERR, "DMA device already allocated");
> +		return NULL;
> +	}
> +
> +	dev_private = rte_zmalloc_socket(name, private_data_size,
> +					 RTE_CACHE_LINE_SIZE, numa_node);
> +	if (dev_private == NULL) {
> +		RTE_DMA_LOG(ERR, "Cannot allocate private data");
> +		return NULL;
> +	}
> +
> +	dev_id = dma_find_free_id();
> +	if (dev_id < 0) {
> +		RTE_DMA_LOG(ERR, "Reached maximum number of DMA devices");
> +		rte_free(dev_private);
> +		return NULL;
> +	}
> +
> +	dev = &rte_dma_devices[dev_id];
> +	rte_strscpy(dev->dev_name, name, sizeof(dev->dev_name));
> +	dev->dev_id = dev_id;
> +	dev->numa_node = numa_node;
> +	dev->dev_private = dev_private;
> +
> +	return dev;
> +}
> +
> +static void
> +dma_release(struct rte_dma_dev *dev)
> +{
> +	rte_free(dev->dev_private);
> +	memset(dev, 0, sizeof(struct rte_dma_dev));
> +}
> +
> +struct rte_dma_dev *
> +rte_dma_pmd_allocate(const char *name, int numa_node, size_t private_data_size)
> +{
> +	struct rte_dma_dev *dev;
> +
> +	if (dma_check_name(name) != 0 || private_data_size == 0)
> +		return NULL;
> +
> +	dev = dma_allocate(name, numa_node, private_data_size);
> +	if (dev == NULL)
> +		return NULL;
> +
> +	dev->state = RTE_DMA_DEV_REGISTERED;
> +
> +	return dev;
> +}
> +
> +int
> +rte_dma_pmd_release(const char *name)
> +{
> +	struct rte_dma_dev *dev;
> +
> +	if (dma_check_name(name) != 0)
> +		return -EINVAL;
> +
> +	dev = dma_find_by_name(name);
> +	if (dev == NULL)
> +		return -EINVAL;
> +
> +	dma_release(dev);
> +	return 0;
> +}

Trying to understand the logic of creation/destroy.
skeldma_probe
\-> skeldma_create
    \-> rte_dma_pmd_allocate
        \-> dma_allocate
            \-> dma_data_prepare
                \-> dma_dev_data_prepare
skeldma_remove
\-> skeldma_destroy
    \-> rte_dma_pmd_release
        \-> dma_release
app
\-> rte_dma_close
    \-> skeldma_close
    \-> dma_release

My only concern is that the PMD remove does not call rte_dma_close.
The PMD should check which dmadev is open for the rte_device to remove,
and close the dmadev first.
This way, no need for the function rte_dma_pmd_release,
and no need to duplicate the release process in two paths.
By the way, the function vchan_release is called only in the close function,
not in the "remove path".




^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v25 1/6] dmadev: introduce DMA device library
  2021-10-12 19:09     ` Thomas Monjalon
@ 2021-10-13  0:21       ` fengchengwen
  2021-10-13  7:41         ` Thomas Monjalon
  0 siblings, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-10-13  0:21 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko, dev, mb, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh, kevin.laatz

On 2021/10/13 3:09, Thomas Monjalon wrote:
> 11/10/2021 09:33, Chengwen Feng:
>> --- /dev/null
>> +++ b/doc/guides/prog_guide/dmadev.rst
>> @@ -0,0 +1,60 @@
>> +.. SPDX-License-Identifier: BSD-3-Clause
>> +   Copyright 2021 HiSilicon Limited
>> +
>> +DMA Device Library
>> +==================
>> +
>> +The DMA library provides a DMA device framework for management and provisioning
>> +of hardware and software DMA poll mode drivers, defining generic API which
>> +support a number of different DMA operations.
>> +
>> +
>> +Design Principles
>> +-----------------
>> +
>> +The DMA framework provides a generic DMA device framework which supports both
>> +physical (hardware) and virtual (software) DMA devices, as well as a generic DMA
>> +API which allows DMA devices to be managed and configured, and supports DMA
>> +operations to be provisioned on DMA poll mode driver.
>> +
>> +.. _figure_dmadev:
>> +
>> +.. figure:: img/dmadev.*
>> +
>> +The above figure shows the model on which the DMA framework is built on:
>> +
>> + * The DMA controller could have multiple hardware DMA channels (aka. hardware
>> +   DMA queues), each hardware DMA channel should be represented by a dmadev.
>> + * The dmadev could create multiple virtual DMA channels, each virtual DMA
>> +   channel represents a different transfer context. The DMA operation request
>> +   must be submitted to the virtual DMA channel. e.g. Application could create
>> +   virtual DMA channel 0 for memory-to-memory transfer scenario, and create
>> +   virtual DMA channel 1 for memory-to-device transfer scenario.
> 
> When updating the doc, we would like to change a minimum of lines,
> so it's better to split lines logically: after a comma, a point,
> or before the next part of the sentence.
> Do not hesitate to make short lines if needed.
> Such change is quite fast to do, thanks.
> 
> [...]
>> +* **Introduced dmadev library with:**
>> +
>> +  * Device allocation functions.
> 
> You can drop this line, it is not a feature.

I'm going to try another description.

> 
> [...]
>> +static int
>> +dma_dev_data_prepare(void)
>> +{
>> +	size_t size;
>> +
>> +	if (rte_dma_devices != NULL)
>> +		return 0;
>> +
>> +	size = dma_devices_max * sizeof(struct rte_dma_dev);
>> +	rte_dma_devices = malloc(size);
>> +	if (rte_dma_devices == NULL)
>> +		return -ENOMEM;
>> +	memset(rte_dma_devices, 0, size);
>> +
>> +	return 0;
>> +}
>> +
>> +static int
>> +dma_data_prepare(void)
>> +{
>> +	if (dma_devices_max == 0)
>> +		dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
>> +	return dma_dev_data_prepare();
>> +}
>> +
>> +static struct rte_dma_dev *
>> +dma_allocate(const char *name, int numa_node, size_t private_data_size)
>> +{
>> +	struct rte_dma_dev *dev;
>> +	void *dev_private;
>> +	int16_t dev_id;
>> +	int ret;
>> +
>> +	ret = dma_data_prepare();
>> +	if (ret < 0) {
>> +		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
>> +		return NULL;
>> +	}
>> +
>> +	dev = dma_find_by_name(name);
>> +	if (dev != NULL) {
>> +		RTE_DMA_LOG(ERR, "DMA device already allocated");
>> +		return NULL;
>> +	}
>> +
>> +	dev_private = rte_zmalloc_socket(name, private_data_size,
>> +					 RTE_CACHE_LINE_SIZE, numa_node);
>> +	if (dev_private == NULL) {
>> +		RTE_DMA_LOG(ERR, "Cannot allocate private data");
>> +		return NULL;
>> +	}
>> +
>> +	dev_id = dma_find_free_id();
>> +	if (dev_id < 0) {
>> +		RTE_DMA_LOG(ERR, "Reached maximum number of DMA devices");
>> +		rte_free(dev_private);
>> +		return NULL;
>> +	}
>> +
>> +	dev = &rte_dma_devices[dev_id];
>> +	rte_strscpy(dev->dev_name, name, sizeof(dev->dev_name));
>> +	dev->dev_id = dev_id;
>> +	dev->numa_node = numa_node;
>> +	dev->dev_private = dev_private;
>> +
>> +	return dev;
>> +}
>> +
>> +static void
>> +dma_release(struct rte_dma_dev *dev)
>> +{
>> +	rte_free(dev->dev_private);
>> +	memset(dev, 0, sizeof(struct rte_dma_dev));
>> +}
>> +
>> +struct rte_dma_dev *
>> +rte_dma_pmd_allocate(const char *name, int numa_node, size_t private_data_size)
>> +{
>> +	struct rte_dma_dev *dev;
>> +
>> +	if (dma_check_name(name) != 0 || private_data_size == 0)
>> +		return NULL;
>> +
>> +	dev = dma_allocate(name, numa_node, private_data_size);
>> +	if (dev == NULL)
>> +		return NULL;
>> +
>> +	dev->state = RTE_DMA_DEV_REGISTERED;
>> +
>> +	return dev;
>> +}
>> +
>> +int
>> +rte_dma_pmd_release(const char *name)
>> +{
>> +	struct rte_dma_dev *dev;
>> +
>> +	if (dma_check_name(name) != 0)
>> +		return -EINVAL;
>> +
>> +	dev = dma_find_by_name(name);
>> +	if (dev == NULL)
>> +		return -EINVAL;
>> +
>> +	dma_release(dev);
>> +	return 0;
>> +}
> 
> Trying to understand the logic of creation/destroy.
> skeldma_probe
> \-> skeldma_create
>     \-> rte_dma_pmd_allocate
>         \-> dma_allocate
>             \-> dma_data_prepare
>                 \-> dma_dev_data_prepare
> skeldma_remove
> \-> skeldma_destroy
>     \-> rte_dma_pmd_release
>         \-> dma_release

This patch only provide device allocate function, the 2st patch provide extra logic:

	diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
	index 42a4693bd9..a6a5680d2b 100644
	--- a/lib/dmadev/rte_dmadev.c
	+++ b/lib/dmadev/rte_dmadev.c
	@@ -201,6 +201,9 @@ rte_dma_pmd_release(const char *name)
	        if (dev == NULL)
        	        return -EINVAL;

	+       if (dev->state == RTE_DMA_DEV_READY)
	+               return rte_dma_close(dev->dev_id);
	+
        	dma_release(dev);
	        return 0;
	 }

So the skeldma remove will be:

 skeldma_remove
 \-> skeldma_destroy
     \-> rte_dma_pmd_release
         \-> rte_dma_close
             \-> dma_release

> app
> \-> rte_dma_close
>     \-> skeldma_close
>     \-> dma_release
> 
> My only concern is that the PMD remove does not call rte_dma_close.

If the device create success, it will call rte_dma_close in device remove phase.

> The PMD should check which dmadev is open for the rte_device to remove,
> and close the dmadev first.
> This way, no need for the function rte_dma_pmd_release,
> and no need to duplicate the release process in two paths.
> By the way, the function vchan_release is called only in the close function,
> not in the "remove path".
> 
> 
> 
> 
> .
> 

Thanks


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v25 1/6] dmadev: introduce DMA device library
  2021-10-13  0:21       ` fengchengwen
@ 2021-10-13  7:41         ` Thomas Monjalon
  2021-10-15  8:29           ` Thomas Monjalon
  0 siblings, 1 reply; 339+ messages in thread
From: Thomas Monjalon @ 2021-10-13  7:41 UTC (permalink / raw)
  To: fengchengwen
  Cc: ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko, dev, mb, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh, kevin.laatz

13/10/2021 02:21, fengchengwen:
> On 2021/10/13 3:09, Thomas Monjalon wrote:
> > 11/10/2021 09:33, Chengwen Feng:
> >> +static void
> >> +dma_release(struct rte_dma_dev *dev)
> >> +{
> >> +	rte_free(dev->dev_private);
> >> +	memset(dev, 0, sizeof(struct rte_dma_dev));
> >> +}
[...]
> >> +int
> >> +rte_dma_pmd_release(const char *name)
> >> +{
> >> +	struct rte_dma_dev *dev;
> >> +
> >> +	if (dma_check_name(name) != 0)
> >> +		return -EINVAL;
> >> +
> >> +	dev = dma_find_by_name(name);
> >> +	if (dev == NULL)
> >> +		return -EINVAL;
> >> +
> >> +	dma_release(dev);
> >> +	return 0;
> >> +}
> > 
> > Trying to understand the logic of creation/destroy.
> > skeldma_probe
> > \-> skeldma_create
> >     \-> rte_dma_pmd_allocate
> >         \-> dma_allocate
> >             \-> dma_data_prepare
> >                 \-> dma_dev_data_prepare
> > skeldma_remove
> > \-> skeldma_destroy
> >     \-> rte_dma_pmd_release
> >         \-> dma_release
> 
> This patch only provide device allocate function, the 2st patch provide extra logic:
> 
> 	diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
> 	index 42a4693bd9..a6a5680d2b 100644
> 	--- a/lib/dmadev/rte_dmadev.c
> 	+++ b/lib/dmadev/rte_dmadev.c
> 	@@ -201,6 +201,9 @@ rte_dma_pmd_release(const char *name)
> 	        if (dev == NULL)
>         	        return -EINVAL;
> 
> 	+       if (dev->state == RTE_DMA_DEV_READY)
> 	+               return rte_dma_close(dev->dev_id);
> 	+
>         	dma_release(dev);
> 	        return 0;
> 	 }
> 
> So the skeldma remove will be:
> 
>  skeldma_remove
>  \-> skeldma_destroy
>      \-> rte_dma_pmd_release
>          \-> rte_dma_close
>              \-> dma_release

OK, in this case, no need to dma_release from rte_dma_pmd_release,
because it is already called from rte_dma_close.




^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v26 0/6] support dmadev
  2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
                   ` (28 preceding siblings ...)
  2021-10-11  7:33 ` [dpdk-dev] [PATCH v25 0/6] support dmadev Chengwen Feng
@ 2021-10-13 12:24 ` Chengwen Feng
  2021-10-13 12:24   ` [dpdk-dev] [PATCH v26 1/6] dmadev: introduce DMA device library Chengwen Feng
                     ` (6 more replies)
  29 siblings, 7 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-10-13 12:24 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch set contains six patch for new add dmadev.

Chengwen Feng (6):
  dmadev: introduce DMA device library
  dmadev: add control plane API support
  dmadev: add data plane API support
  dmadev: add multi-process support
  dma/skeleton: introduce skeleton dmadev driver
  app/test: add dmadev API test

---
v26:
* use prefix comment in rte_dmadev.h group macro.
* rte_dma_info add filed dev_name.
* rte_dma_fp_object fix align 128.
* delete some useless content of dma.rst.
v25:
* fix undefined reference to rte_dma_fp_objs with some compiler suite.
* use rte_dma_dev hold pointer to rte_dma_fp_objs to avoid primary and
  secondary disuniformity.
v24:
* use rte_dma_fp_object to hide implementation details.
* support group doxygen for RTE_DMA_CAPA_* and RTE_DMA_OP_*.
* adjusted the naming of some functions.
* fix typo.
v23:
* split multi-process support from 1st patch.
* fix some static check warning.
* fix skeleton cpu thread zero_req_count flip bug.
* add test_dmadev_api.h.
* add the description of modifying the dmadev state when init OK.
v22:
* function prefix change from rte_dmadev_* to rte_dma_*.
* change to prefix comment in most scenarios.
* dmadev dev_id use int16_t type.
* fix typo.
* organize patchsets in incremental mode.

 MAINTAINERS                            |    7 +
 app/test/meson.build                   |    4 +
 app/test/test_dmadev.c                 |   41 +
 app/test/test_dmadev_api.c             |  574 +++++++++++++
 app/test/test_dmadev_api.h             |    5 +
 doc/api/doxy-api-index.md              |    1 +
 doc/api/doxy-api.conf.in               |    1 +
 doc/guides/dmadevs/index.rst           |   12 +
 doc/guides/index.rst                   |    1 +
 doc/guides/prog_guide/dmadev.rst       |   90 ++
 doc/guides/prog_guide/img/dmadev.svg   |  283 +++++++
 doc/guides/prog_guide/index.rst        |    1 +
 doc/guides/rel_notes/release_21_11.rst |    8 +
 drivers/dma/meson.build                |    6 +
 drivers/dma/skeleton/meson.build       |    7 +
 drivers/dma/skeleton/skeleton_dmadev.c |  571 +++++++++++++
 drivers/dma/skeleton/skeleton_dmadev.h |   61 ++
 drivers/dma/skeleton/version.map       |    3 +
 drivers/meson.build                    |    1 +
 lib/dmadev/meson.build                 |    7 +
 lib/dmadev/rte_dmadev.c                |  826 +++++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 1049 ++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h           |   76 ++
 lib/dmadev/rte_dmadev_pmd.h            |  168 ++++
 lib/dmadev/version.map                 |   36 +
 lib/meson.build                        |    1 +
 26 files changed, 3840 insertions(+)
 create mode 100644 app/test/test_dmadev.c
 create mode 100644 app/test/test_dmadev_api.c
 create mode 100644 app/test/test_dmadev_api.h
 create mode 100644 doc/guides/dmadevs/index.rst
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg
 create mode 100644 drivers/dma/meson.build
 create mode 100644 drivers/dma/skeleton/meson.build
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
 create mode 100644 drivers/dma/skeleton/version.map
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_core.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v26 1/6] dmadev: introduce DMA device library
  2021-10-13 12:24 ` [dpdk-dev] [PATCH v26 0/6] support dmadev Chengwen Feng
@ 2021-10-13 12:24   ` Chengwen Feng
  2021-10-13 12:24   ` [dpdk-dev] [PATCH v26 2/6] dmadev: add control plane API support Chengwen Feng
                     ` (5 subsequent siblings)
  6 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-10-13 12:24 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

The 'dmadev' is a generic type of DMA device.

This patch introduce the 'dmadev' device allocation functions.

The infrastructure is prepared to welcome drivers in drivers/dma/

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Acked-by: Jerin Jacob <jerinjacobk@gmail.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 MAINTAINERS                            |   5 +
 doc/api/doxy-api-index.md              |   1 +
 doc/api/doxy-api.conf.in               |   1 +
 doc/guides/dmadevs/index.rst           |  12 ++
 doc/guides/index.rst                   |   1 +
 doc/guides/prog_guide/dmadev.rst       |  58 +++++
 doc/guides/prog_guide/img/dmadev.svg   | 283 +++++++++++++++++++++++++
 doc/guides/prog_guide/index.rst        |   1 +
 doc/guides/rel_notes/release_21_11.rst |   5 +
 drivers/dma/meson.build                |   4 +
 drivers/meson.build                    |   1 +
 lib/dmadev/meson.build                 |   6 +
 lib/dmadev/rte_dmadev.c                | 246 +++++++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 133 ++++++++++++
 lib/dmadev/rte_dmadev_pmd.h            |  90 ++++++++
 lib/dmadev/version.map                 |  20 ++
 lib/meson.build                        |   1 +
 17 files changed, 868 insertions(+)
 create mode 100644 doc/guides/dmadevs/index.rst
 create mode 100644 doc/guides/prog_guide/dmadev.rst
 create mode 100644 doc/guides/prog_guide/img/dmadev.svg
 create mode 100644 drivers/dma/meson.build
 create mode 100644 lib/dmadev/meson.build
 create mode 100644 lib/dmadev/rte_dmadev.c
 create mode 100644 lib/dmadev/rte_dmadev.h
 create mode 100644 lib/dmadev/rte_dmadev_pmd.h
 create mode 100644 lib/dmadev/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 278e5b3226..119cfaa04e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -454,6 +454,11 @@ F: app/test-regex/
 F: doc/guides/prog_guide/regexdev.rst
 F: doc/guides/regexdevs/features/default.ini
 
+DMA device API - EXPERIMENTAL
+M: Chengwen Feng <fengchengwen@huawei.com>
+F: lib/dmadev/
+F: doc/guides/prog_guide/dmadev.rst
+
 Eventdev API
 M: Jerin Jacob <jerinj@marvell.com>
 T: git://dpdk.org/next/dpdk-next-eventdev
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index 1992107a03..2939050431 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -21,6 +21,7 @@ The public API headers are grouped by topics:
   [compressdev]        (@ref rte_compressdev.h),
   [compress]           (@ref rte_comp.h),
   [regexdev]           (@ref rte_regexdev.h),
+  [dmadev]             (@ref rte_dmadev.h),
   [eventdev]           (@ref rte_eventdev.h),
   [event_eth_rx_adapter]   (@ref rte_event_eth_rx_adapter.h),
   [event_eth_tx_adapter]   (@ref rte_event_eth_tx_adapter.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 325a0195c6..109ec1f682 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -35,6 +35,7 @@ INPUT                   = @TOPDIR@/doc/api/doxy-api-index.md \
                           @TOPDIR@/lib/compressdev \
                           @TOPDIR@/lib/cryptodev \
                           @TOPDIR@/lib/distributor \
+                          @TOPDIR@/lib/dmadev \
                           @TOPDIR@/lib/efd \
                           @TOPDIR@/lib/ethdev \
                           @TOPDIR@/lib/eventdev \
diff --git a/doc/guides/dmadevs/index.rst b/doc/guides/dmadevs/index.rst
new file mode 100644
index 0000000000..0bce29d766
--- /dev/null
+++ b/doc/guides/dmadevs/index.rst
@@ -0,0 +1,12 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright 2021 HiSilicon Limited
+
+DMA Device Drivers
+==================
+
+The following are a list of DMA device drivers, which can be used from
+an application through DMA API.
+
+.. toctree::
+   :maxdepth: 2
+   :numbered:
diff --git a/doc/guides/index.rst b/doc/guides/index.rst
index 857f0363d3..919825992e 100644
--- a/doc/guides/index.rst
+++ b/doc/guides/index.rst
@@ -21,6 +21,7 @@ DPDK documentation
    compressdevs/index
    vdpadevs/index
    regexdevs/index
+   dmadevs/index
    eventdevs/index
    rawdevs/index
    mempool/index
diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
new file mode 100644
index 0000000000..46b85ce217
--- /dev/null
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -0,0 +1,58 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+   Copyright 2021 HiSilicon Limited
+
+DMA Device Library
+==================
+
+The DMA library provides a DMA device framework for management and provisioning
+of hardware and software DMA poll mode drivers, defining generic API which
+support a number of different DMA operations.
+
+
+Design Principles
+-----------------
+
+The DMA framework provides a generic DMA device framework which supports both
+physical (hardware) and virtual (software) DMA devices, as well as a generic DMA
+API which allows DMA devices to be managed and configured, and supports DMA
+operations to be provisioned on DMA poll mode driver.
+
+.. _figure_dmadev:
+
+.. figure:: img/dmadev.*
+
+The above figure shows the model on which the DMA framework is built on:
+
+ * The DMA controller could have multiple hardware DMA channels (aka. hardware
+   DMA queues), each hardware DMA channel should be represented by a dmadev.
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+   channel represents a different transfer context.
+ * The DMA operation request must be submitted to the virtual DMA channel.
+
+
+Device Management
+-----------------
+
+Device Creation
+~~~~~~~~~~~~~~~
+
+Physical DMA controllers are discovered during the PCI probe/enumeration of the
+EAL function which is executed at DPDK initialization, this is based on their
+PCI BDF (bus/bridge, device, function). Specific physical DMA controllers, like
+other physical devices in DPDK can be listed using the EAL command line options.
+
+The dmadevs are dynamically allocated by using the function
+``rte_dma_pmd_allocate`` based on the number of hardware DMA channels.
+
+
+Device Identification
+~~~~~~~~~~~~~~~~~~~~~
+
+Each DMA device, whether physical or virtual is uniquely designated by two
+identifiers:
+
+- A unique device index used to designate the DMA device in all functions
+  exported by the DMA API.
+
+- A device name used to designate the DMA device in console messages, for
+  administration or debugging purposes.
diff --git a/doc/guides/prog_guide/img/dmadev.svg b/doc/guides/prog_guide/img/dmadev.svg
new file mode 100644
index 0000000000..157d7eb7dc
--- /dev/null
+++ b/doc/guides/prog_guide/img/dmadev.svg
@@ -0,0 +1,283 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<!-- SPDX-License-Identifier: BSD-3-Clause -->
+<!-- Copyright(c) 2021 HiSilicon Limited -->
+
+<svg
+   width="128.64288mm"
+   height="95.477707mm"
+   viewBox="0 0 192.96433 143.21656"
+   version="1.1"
+   id="svg934"
+   inkscape:version="1.1 (c68e22c387, 2021-05-23)"
+   sodipodi:docname="dmadev.svg"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:svg="http://www.w3.org/2000/svg">
+  <sodipodi:namedview
+     id="namedview936"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageshadow="2"
+     inkscape:pageopacity="0.0"
+     inkscape:pagecheckerboard="0"
+     inkscape:document-units="mm"
+     showgrid="false"
+     fit-margin-top="0"
+     fit-margin-left="0"
+     fit-margin-right="0"
+     fit-margin-bottom="0"
+     inkscape:showpageshadow="false"
+     inkscape:zoom="1.332716"
+     inkscape:cx="335.03011"
+     inkscape:cy="143.69152"
+     inkscape:window-width="1920"
+     inkscape:window-height="976"
+     inkscape:window-x="-8"
+     inkscape:window-y="-8"
+     inkscape:window-maximized="1"
+     inkscape:current-layer="layer1"
+     scale-x="1.5"
+     units="mm" />
+  <defs
+     id="defs931">
+    <rect
+       x="342.43954"
+       y="106.56832"
+       width="58.257381"
+       height="137.82834"
+       id="rect17873" />
+  </defs>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1"
+     transform="translate(-0.13857517,-21.527306)">
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9"
+       width="50"
+       height="28"
+       x="0.13857517"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1"
+       transform="translate(-49.110795,15.205683)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1045">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1047">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5"
+       width="50"
+       height="28"
+       x="60.138577"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4"
+       transform="translate(10.512565,15.373298)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1049">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1051">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.296755"
+       id="rect31-9-5-3"
+       width="50"
+       height="28"
+       x="137.43863"
+       y="21.527306"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-8"
+       transform="translate(88.79231,15.373299)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1053">virtual DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1055">channel</tspan></text>
+    <text
+       xml:space="preserve"
+       transform="matrix(0.26458333,0,0,0.26458333,-0.04940429,21.408845)"
+       id="text17871"
+       style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;white-space:pre;shape-inside:url(#rect17873);fill:#000000;fill-opacity:1;stroke:none" />
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8"
+       width="38.34557"
+       height="19.729115"
+       x="36.138577"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3"
+       transform="translate(-13.394978,59.135217)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1057">dmadev</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0"
+       width="60.902534"
+       height="24.616455"
+       x="25.196909"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76"
+       transform="translate(-24.485484,90.97883)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1059">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1061">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-6"
+       width="60.902534"
+       height="24.616455"
+       x="132.20036"
+       y="98.47744"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-7"
+       transform="translate(82.950904,90.79085)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1063">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1065">channel</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.307089"
+       id="rect31-9-5-8-0-4"
+       width="60.902534"
+       height="24.616455"
+       x="76.810928"
+       y="140.12741"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-76-4"
+       transform="translate(27.032341,133.10574)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1067">hardware DMA </tspan><tspan
+         x="54.136707"
+         y="26.865018"
+         id="tspan1069">controller</tspan></text>
+    <rect
+       style="fill:#c9c9ff;fill-opacity:1;stroke-width:0.218145"
+       id="rect31-9-5-8-5"
+       width="38.34557"
+       height="19.729115"
+       x="143.43863"
+       y="64.827354"
+       ry="0" />
+    <text
+       xml:space="preserve"
+       style="font-style:normal;font-weight:normal;font-size:7.05556px;line-height:1.25;font-family:sans-serif;white-space:pre;inline-size:70.1114;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.264583"
+       x="54.136707"
+       y="18.045568"
+       id="text803-1-4-3-7"
+       transform="translate(94.92597,59.664385)"><tspan
+         x="54.136707"
+         y="18.045568"
+         id="tspan1071">dmadev</tspan></text>
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 74.476373,49.527306 62.82407,64.827354"
+       id="path45308"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 35.924309,49.527306 47.711612,64.827354"
+       id="path45310"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9"
+       inkscape:connection-end="#rect31-9-5-8" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="M 55.403414,84.556469 55.53332,98.47744"
+       id="path45312"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8"
+       inkscape:connection-end="#rect31-9-5-8-0" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.62241,84.556469 0.0155,13.920971"
+       id="path45320"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-5"
+       inkscape:connection-end="#rect31-9-5-8-0-6" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 146.28317,123.09389 -22.65252,17.03352"
+       id="path45586"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0-6"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 70.900938,123.09389 21.108496,17.03352"
+       id="path45588"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-8-0"
+       inkscape:connection-end="#rect31-9-5-8-0-4" />
+    <path
+       style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:0.264583px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+       d="m 162.50039,49.527306 0.0675,15.300048"
+       id="path45956"
+       inkscape:connector-type="polyline"
+       inkscape:connector-curvature="0"
+       inkscape:connection-start="#rect31-9-5-3"
+       inkscape:connection-end="#rect31-9-5-8-5" />
+  </g>
+</svg>
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 2dce507f46..89af28dacb 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -27,6 +27,7 @@ Programmer's Guide
     cryptodev_lib
     compressdev
     regexdev
+    dmadev
     rte_security
     rawdev
     link_bonding_poll_mode_drv_lib
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index d5c762df62..1c7c1d6eab 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -76,6 +76,11 @@ New Features
   * ``RTE_ETH_RX_METADATA_USER_MARK``
   * ``RTE_ETH_RX_METADATA_TUNNEL_ID``
 
+* **Added dmadev library.**
+
+  * Added a DMA device framework for management and provision of
+    hardware and software DMA devices.
+
 * **Updated af_packet ethdev driver.**
 
   * Default VLAN strip behavior was changed. VLAN tag won't be stripped
diff --git a/drivers/dma/meson.build b/drivers/dma/meson.build
new file mode 100644
index 0000000000..a24c56d8ff
--- /dev/null
+++ b/drivers/dma/meson.build
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2021 HiSilicon Limited
+
+drivers = []
diff --git a/drivers/meson.build b/drivers/meson.build
index 3d08540581..b7d680868a 100644
--- a/drivers/meson.build
+++ b/drivers/meson.build
@@ -18,6 +18,7 @@ subdirs = [
         'vdpa',           # depends on common, bus and mempool.
         'event',          # depends on common, bus, mempool and net.
         'baseband',       # depends on common and bus.
+        'dma',            # depends on common and bus.
 ]
 
 if meson.is_cross_build()
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
new file mode 100644
index 0000000000..f8d54c6e74
--- /dev/null
+++ b/lib/dmadev/meson.build
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited.
+
+sources = files('rte_dmadev.c')
+headers = files('rte_dmadev.h')
+driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
new file mode 100644
index 0000000000..42a4693bd9
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.c
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include <inttypes.h>
+
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_string_fns.h>
+
+#include "rte_dmadev.h"
+#include "rte_dmadev_pmd.h"
+
+static int16_t dma_devices_max;
+
+struct rte_dma_dev *rte_dma_devices;
+
+RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
+#define RTE_DMA_LOG(level, ...) \
+	rte_log(RTE_LOG_ ## level, rte_dma_logtype, RTE_FMT("dma: " \
+		RTE_FMT_HEAD(__VA_ARGS__,) "\n", RTE_FMT_TAIL(__VA_ARGS__,)))
+
+int
+rte_dma_dev_max(size_t dev_max)
+{
+	/* This function may be called before rte_eal_init(), so no rte library
+	 * function can be called in this function.
+	 */
+	if (dev_max == 0 || dev_max > INT16_MAX)
+		return -EINVAL;
+
+	if (dma_devices_max > 0)
+		return -EINVAL;
+
+	dma_devices_max = dev_max;
+
+	return 0;
+}
+
+static int
+dma_check_name(const char *name)
+{
+	size_t name_len;
+
+	if (name == NULL) {
+		RTE_DMA_LOG(ERR, "Name can't be NULL");
+		return -EINVAL;
+	}
+
+	name_len = strnlen(name, RTE_DEV_NAME_MAX_LEN);
+	if (name_len == 0) {
+		RTE_DMA_LOG(ERR, "Zero length DMA device name");
+		return -EINVAL;
+	}
+	if (name_len >= RTE_DEV_NAME_MAX_LEN) {
+		RTE_DMA_LOG(ERR, "DMA device name is too long");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int16_t
+dma_find_free_id(void)
+{
+	int16_t i;
+
+	if (rte_dma_devices == NULL)
+		return -1;
+
+	for (i = 0; i < dma_devices_max; i++) {
+		if (rte_dma_devices[i].state == RTE_DMA_DEV_UNUSED)
+			return i;
+	}
+
+	return -1;
+}
+
+static struct rte_dma_dev*
+dma_find_by_name(const char *name)
+{
+	int16_t i;
+
+	if (rte_dma_devices == NULL)
+		return NULL;
+
+	for (i = 0; i < dma_devices_max; i++) {
+		if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) &&
+		    (!strcmp(name, rte_dma_devices[i].dev_name)))
+			return &rte_dma_devices[i];
+	}
+
+	return NULL;
+}
+
+static int
+dma_dev_data_prepare(void)
+{
+	size_t size;
+
+	if (rte_dma_devices != NULL)
+		return 0;
+
+	size = dma_devices_max * sizeof(struct rte_dma_dev);
+	rte_dma_devices = malloc(size);
+	if (rte_dma_devices == NULL)
+		return -ENOMEM;
+	memset(rte_dma_devices, 0, size);
+
+	return 0;
+}
+
+static int
+dma_data_prepare(void)
+{
+	if (dma_devices_max == 0)
+		dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
+	return dma_dev_data_prepare();
+}
+
+static struct rte_dma_dev *
+dma_allocate(const char *name, int numa_node, size_t private_data_size)
+{
+	struct rte_dma_dev *dev;
+	void *dev_private;
+	int16_t dev_id;
+	int ret;
+
+	ret = dma_data_prepare();
+	if (ret < 0) {
+		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
+		return NULL;
+	}
+
+	dev = dma_find_by_name(name);
+	if (dev != NULL) {
+		RTE_DMA_LOG(ERR, "DMA device already allocated");
+		return NULL;
+	}
+
+	dev_private = rte_zmalloc_socket(name, private_data_size,
+					 RTE_CACHE_LINE_SIZE, numa_node);
+	if (dev_private == NULL) {
+		RTE_DMA_LOG(ERR, "Cannot allocate private data");
+		return NULL;
+	}
+
+	dev_id = dma_find_free_id();
+	if (dev_id < 0) {
+		RTE_DMA_LOG(ERR, "Reached maximum number of DMA devices");
+		rte_free(dev_private);
+		return NULL;
+	}
+
+	dev = &rte_dma_devices[dev_id];
+	rte_strscpy(dev->dev_name, name, sizeof(dev->dev_name));
+	dev->dev_id = dev_id;
+	dev->numa_node = numa_node;
+	dev->dev_private = dev_private;
+
+	return dev;
+}
+
+static void
+dma_release(struct rte_dma_dev *dev)
+{
+	rte_free(dev->dev_private);
+	memset(dev, 0, sizeof(struct rte_dma_dev));
+}
+
+struct rte_dma_dev *
+rte_dma_pmd_allocate(const char *name, int numa_node, size_t private_data_size)
+{
+	struct rte_dma_dev *dev;
+
+	if (dma_check_name(name) != 0 || private_data_size == 0)
+		return NULL;
+
+	dev = dma_allocate(name, numa_node, private_data_size);
+	if (dev == NULL)
+		return NULL;
+
+	dev->state = RTE_DMA_DEV_REGISTERED;
+
+	return dev;
+}
+
+int
+rte_dma_pmd_release(const char *name)
+{
+	struct rte_dma_dev *dev;
+
+	if (dma_check_name(name) != 0)
+		return -EINVAL;
+
+	dev = dma_find_by_name(name);
+	if (dev == NULL)
+		return -EINVAL;
+
+	dma_release(dev);
+	return 0;
+}
+
+int
+rte_dma_get_dev_id_by_name(const char *name)
+{
+	struct rte_dma_dev *dev;
+
+	if (dma_check_name(name) != 0)
+		return -EINVAL;
+
+	dev = dma_find_by_name(name);
+	if (dev == NULL)
+		return -EINVAL;
+
+	return dev->dev_id;
+}
+
+bool
+rte_dma_is_valid(int16_t dev_id)
+{
+	return (dev_id >= 0) && (dev_id < dma_devices_max) &&
+		rte_dma_devices != NULL &&
+		rte_dma_devices[dev_id].state != RTE_DMA_DEV_UNUSED;
+}
+
+uint16_t
+rte_dma_count_avail(void)
+{
+	uint16_t count = 0;
+	uint16_t i;
+
+	if (rte_dma_devices == NULL)
+		return count;
+
+	for (i = 0; i < dma_devices_max; i++) {
+		if (rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED)
+			count++;
+	}
+
+	return count;
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
new file mode 100644
index 0000000000..87810f2f08
--- /dev/null
+++ b/lib/dmadev/rte_dmadev.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ * Copyright(c) 2021 Intel Corporation
+ * Copyright(c) 2021 Marvell International Ltd
+ * Copyright(c) 2021 SmartShare Systems
+ */
+
+#ifndef RTE_DMADEV_H
+#define RTE_DMADEV_H
+
+/**
+ * @file rte_dmadev.h
+ *
+ * DMA (Direct Memory Access) device API.
+ *
+ * The DMA framework is built on the following model:
+ *
+ *     ---------------   ---------------       ---------------
+ *     | virtual DMA |   | virtual DMA |       | virtual DMA |
+ *     | channel     |   | channel     |       | channel     |
+ *     ---------------   ---------------       ---------------
+ *            |                |                      |
+ *            ------------------                      |
+ *                     |                              |
+ *               ------------                    ------------
+ *               |  dmadev  |                    |  dmadev  |
+ *               ------------                    ------------
+ *                     |                              |
+ *            ------------------               ------------------
+ *            | HW DMA channel |               | HW DMA channel |
+ *            ------------------               ------------------
+ *                     |                              |
+ *                     --------------------------------
+ *                                     |
+ *                           ---------------------
+ *                           | HW DMA Controller |
+ *                           ---------------------
+ *
+ * The DMA controller could have multiple HW-DMA-channels (aka. HW-DMA-queues),
+ * each HW-DMA-channel should be represented by a dmadev.
+ *
+ * The dmadev could create multiple virtual DMA channels, each virtual DMA
+ * channel represents a different transfer context. The DMA operation request
+ * must be submitted to the virtual DMA channel. e.g. Application could create
+ * virtual DMA channel 0 for memory-to-memory transfer scenario, and create
+ * virtual DMA channel 1 for memory-to-device transfer scenario.
+ *
+ * This framework uses 'int16_t dev_id' as the device identifier of a dmadev,
+ * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
+ *
+ */
+
+#include <stdint.h>
+
+#include <rte_bitops.h>
+#include <rte_common.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Maximum number of devices if rte_dma_dev_max() is not called. */
+#define RTE_DMADEV_DEFAULT_MAX 64
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure the maximum number of dmadevs.
+ * @note This function can be invoked before the primary process rte_eal_init()
+ * to change the maximum number of dmadevs. If not invoked, the maximum number
+ * of dmadevs is @see RTE_DMADEV_DEFAULT_MAX
+ *
+ * @param dev_max
+ *   maximum number of dmadevs.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_dev_max(size_t dev_max);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the device identifier for the named DMA device.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   Returns DMA device identifier on success.
+ *   - <0: Failure to find named DMA device.
+ */
+__rte_experimental
+int rte_dma_get_dev_id_by_name(const char *name);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Check whether the dev_id is valid.
+ *
+ * @param dev_id
+ *   DMA device index.
+ *
+ * @return
+ *   - If the device index is valid (true) or not (false).
+ */
+__rte_experimental
+bool rte_dma_is_valid(int16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get the total number of DMA devices that have been successfully
+ * initialised.
+ *
+ * @return
+ *   The total number of usable DMA devices.
+ */
+__rte_experimental
+uint16_t rte_dma_count_avail(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_DMADEV_H */
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
new file mode 100644
index 0000000000..bb09382dce
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+#ifndef RTE_DMADEV_PMD_H
+#define RTE_DMADEV_PMD_H
+
+/**
+ * @file
+ *
+ * DMA Device PMD interface
+ *
+ * Driver facing interface for a DMA device. These are not to be called directly
+ * by any application.
+ */
+
+#include "rte_dmadev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Possible states of a DMA device.
+ *
+ * @see struct rte_dma_dev::state
+ */
+enum rte_dma_dev_state {
+	RTE_DMA_DEV_UNUSED = 0, /**< Device is unused. */
+	/** Device is registered, but not ready to be used. */
+	RTE_DMA_DEV_REGISTERED,
+	/** Device is ready for use. This is set by the PMD. */
+	RTE_DMA_DEV_READY,
+};
+
+/**
+ * @internal
+ * The generic data structure associated with each DMA device.
+ */
+struct rte_dma_dev {
+	char dev_name[RTE_DEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	int16_t dev_id; /**< Device [external] identifier. */
+	int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */
+	void *dev_private; /**< PMD-specific private data. */
+	/** Device info which supplied during device initialization. */
+	struct rte_device *device;
+	enum rte_dma_dev_state state; /**< Flag indicating the device state. */
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+} __rte_cache_aligned;
+
+extern struct rte_dma_dev *rte_dma_devices;
+
+/**
+ * @internal
+ * Allocate a new dmadev slot for an DMA device and return the pointer to that
+ * slot for the driver to use.
+ *
+ * @param name
+ *   DMA device name.
+ * @param numa_node
+ *   Driver's private data's NUMA node.
+ * @param private_data_size
+ *   Driver's private data size.
+ *
+ * @return
+ *   A pointer to the DMA device slot case of success,
+ *   NULL otherwise.
+ */
+__rte_internal
+struct rte_dma_dev *rte_dma_pmd_allocate(const char *name, int numa_node,
+					 size_t private_data_size);
+
+/**
+ * @internal
+ * Release the specified dmadev.
+ *
+ * @param name
+ *   DMA device name.
+ *
+ * @return
+ *   - 0 on success, negative on error.
+ */
+__rte_internal
+int rte_dma_pmd_release(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_DMADEV_PMD_H */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
new file mode 100644
index 0000000000..f8a0076468
--- /dev/null
+++ b/lib/dmadev/version.map
@@ -0,0 +1,20 @@
+EXPERIMENTAL {
+	global:
+
+	rte_dma_count_avail;
+	rte_dma_dev_max;
+	rte_dma_get_dev_id_by_name;
+	rte_dma_is_valid;
+
+	local: *;
+};
+
+INTERNAL {
+	global:
+
+	rte_dma_devices;
+	rte_dma_pmd_allocate;
+	rte_dma_pmd_release;
+
+	local: *;
+};
diff --git a/lib/meson.build b/lib/meson.build
index b2ba7258d8..3b8b099820 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -45,6 +45,7 @@ libraries = [
         'pdump',
         'rawdev',
         'regexdev',
+        'dmadev',
         'rib',
         'reorder',
         'sched',
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v26 2/6] dmadev: add control plane API support
  2021-10-13 12:24 ` [dpdk-dev] [PATCH v26 0/6] support dmadev Chengwen Feng
  2021-10-13 12:24   ` [dpdk-dev] [PATCH v26 1/6] dmadev: introduce DMA device library Chengwen Feng
@ 2021-10-13 12:24   ` Chengwen Feng
  2021-10-13 12:24   ` [dpdk-dev] [PATCH v26 3/6] dmadev: add data " Chengwen Feng
                     ` (4 subsequent siblings)
  6 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-10-13 12:24 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch add control plane API for dmadev.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 doc/guides/prog_guide/dmadev.rst       |  10 +
 doc/guides/rel_notes/release_21_11.rst |   2 +
 lib/dmadev/rte_dmadev.c                | 361 +++++++++++++++++++
 lib/dmadev/rte_dmadev.h                | 465 +++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_pmd.h            |  61 ++++
 lib/dmadev/version.map                 |   9 +
 6 files changed, 908 insertions(+)

diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
index 46b85ce217..b1b5bff639 100644
--- a/doc/guides/prog_guide/dmadev.rst
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -56,3 +56,13 @@ identifiers:
 
 - A device name used to designate the DMA device in console messages, for
   administration or debugging purposes.
+
+
+Device Features and Capabilities
+--------------------------------
+
+DMA devices may support different feature sets. The ``rte_dma_info_get`` API
+can be used to get the device info and supported features.
+
+Silent mode is a special device capability which does not require the
+application to invoke dequeue APIs.
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 1c7c1d6eab..2953bc3e97 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -80,6 +80,8 @@ New Features
 
   * Added a DMA device framework for management and provision of
     hardware and software DMA devices.
+  * Added generic API which support a number of different DMA
+    operations.
 
 * **Updated af_packet ethdev driver.**
 
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
index 42a4693bd9..9223ae1c95 100644
--- a/lib/dmadev/rte_dmadev.c
+++ b/lib/dmadev/rte_dmadev.c
@@ -201,6 +201,9 @@ rte_dma_pmd_release(const char *name)
 	if (dev == NULL)
 		return -EINVAL;
 
+	if (dev->state == RTE_DMA_DEV_READY)
+		return rte_dma_close(dev->dev_id);
+
 	dma_release(dev);
 	return 0;
 }
@@ -244,3 +247,361 @@ rte_dma_count_avail(void)
 
 	return count;
 }
+
+int
+rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
+{
+	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	if (!rte_dma_is_valid(dev_id) || dev_info == NULL)
+		return -EINVAL;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_info_get, -ENOTSUP);
+	memset(dev_info, 0, sizeof(struct rte_dma_info));
+	ret = (*dev->dev_ops->dev_info_get)(dev, dev_info,
+					    sizeof(struct rte_dma_info));
+	if (ret != 0)
+		return ret;
+
+	dev_info->dev_name = dev->dev_name;
+	dev_info->numa_node = dev->device->numa_node;
+	dev_info->nb_vchans = dev->dev_conf.nb_vchans;
+
+	return 0;
+}
+
+int
+rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	struct rte_dma_info dev_info;
+	int ret;
+
+	if (!rte_dma_is_valid(dev_id) || dev_conf == NULL)
+		return -EINVAL;
+
+	if (dev->dev_started != 0) {
+		RTE_DMA_LOG(ERR,
+			"Device %d must be stopped to allow configuration",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dma_info_get(dev_id, &dev_info);
+	if (ret != 0) {
+		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans == 0) {
+		RTE_DMA_LOG(ERR,
+			"Device %d configure zero vchans", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->nb_vchans > dev_info.max_vchans) {
+		RTE_DMA_LOG(ERR,
+			"Device %d configure too many vchans", dev_id);
+		return -EINVAL;
+	}
+	if (dev_conf->enable_silent &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_SILENT)) {
+		RTE_DMA_LOG(ERR, "Device %d don't support silent", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
+					     sizeof(struct rte_dma_conf));
+	if (ret == 0)
+		memcpy(&dev->dev_conf, dev_conf, sizeof(struct rte_dma_conf));
+
+	return ret;
+}
+
+int
+rte_dma_start(int16_t dev_id)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	if (!rte_dma_is_valid(dev_id))
+		return -EINVAL;
+
+	if (dev->dev_conf.nb_vchans == 0) {
+		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
+		return -EINVAL;
+	}
+
+	if (dev->dev_started != 0) {
+		RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_start == NULL)
+		goto mark_started;
+
+	ret = (*dev->dev_ops->dev_start)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_started:
+	dev->dev_started = 1;
+	return 0;
+}
+
+int
+rte_dma_stop(int16_t dev_id)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	if (!rte_dma_is_valid(dev_id))
+		return -EINVAL;
+
+	if (dev->dev_started == 0) {
+		RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
+		return 0;
+	}
+
+	if (dev->dev_ops->dev_stop == NULL)
+		goto mark_stopped;
+
+	ret = (*dev->dev_ops->dev_stop)(dev);
+	if (ret != 0)
+		return ret;
+
+mark_stopped:
+	dev->dev_started = 0;
+	return 0;
+}
+
+int
+rte_dma_close(int16_t dev_id)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	int ret;
+
+	if (!rte_dma_is_valid(dev_id))
+		return -EINVAL;
+
+	/* Device must be stopped before it can be closed */
+	if (dev->dev_started == 1) {
+		RTE_DMA_LOG(ERR,
+			"Device %d must be stopped before closing", dev_id);
+		return -EBUSY;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+	ret = (*dev->dev_ops->dev_close)(dev);
+	if (ret == 0)
+		dma_release(dev);
+
+	return ret;
+}
+
+int
+rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+		    const struct rte_dma_vchan_conf *conf)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	struct rte_dma_info dev_info;
+	bool src_is_dev, dst_is_dev;
+	int ret;
+
+	if (!rte_dma_is_valid(dev_id) || conf == NULL)
+		return -EINVAL;
+
+	if (dev->dev_started != 0) {
+		RTE_DMA_LOG(ERR,
+			"Device %d must be stopped to allow configuration",
+			dev_id);
+		return -EBUSY;
+	}
+
+	ret = rte_dma_info_get(dev_id, &dev_info);
+	if (ret != 0) {
+		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+		return -EINVAL;
+	}
+	if (dev->dev_conf.nb_vchans == 0) {
+		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
+		return -EINVAL;
+	}
+	if (vchan >= dev_info.nb_vchans) {
+		RTE_DMA_LOG(ERR, "Device %d vchan out range!", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction != RTE_DMA_DIR_MEM_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_MEM_TO_DEV &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_MEM &&
+	    conf->direction != RTE_DMA_DIR_DEV_TO_DEV) {
+		RTE_DMA_LOG(ERR, "Device %d direction invalid!", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_MEM &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support mem2mem transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_MEM_TO_DEV &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_MEM_TO_DEV)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support mem2dev transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_MEM &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_MEM)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support dev2mem transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->direction == RTE_DMA_DIR_DEV_TO_DEV &&
+	    !(dev_info.dev_capa & RTE_DMA_CAPA_DEV_TO_DEV)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d don't support dev2dev transfer", dev_id);
+		return -EINVAL;
+	}
+	if (conf->nb_desc < dev_info.min_desc ||
+	    conf->nb_desc > dev_info.max_desc) {
+		RTE_DMA_LOG(ERR,
+			"Device %d number of descriptors invalid", dev_id);
+		return -EINVAL;
+	}
+	src_is_dev = conf->direction == RTE_DMA_DIR_DEV_TO_MEM ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->src_port.port_type == RTE_DMA_PORT_NONE && src_is_dev) ||
+	    (conf->src_port.port_type != RTE_DMA_PORT_NONE && !src_is_dev)) {
+		RTE_DMA_LOG(ERR, "Device %d source port type invalid", dev_id);
+		return -EINVAL;
+	}
+	dst_is_dev = conf->direction == RTE_DMA_DIR_MEM_TO_DEV ||
+		     conf->direction == RTE_DMA_DIR_DEV_TO_DEV;
+	if ((conf->dst_port.port_type == RTE_DMA_PORT_NONE && dst_is_dev) ||
+	    (conf->dst_port.port_type != RTE_DMA_PORT_NONE && !dst_is_dev)) {
+		RTE_DMA_LOG(ERR,
+			"Device %d destination port type invalid", dev_id);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vchan_setup, -ENOTSUP);
+	return (*dev->dev_ops->vchan_setup)(dev, vchan, conf,
+					sizeof(struct rte_dma_vchan_conf));
+}
+
+int
+rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
+{
+	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+	if (!rte_dma_is_valid(dev_id) || stats == NULL)
+		return -EINVAL;
+
+	if (vchan >= dev->dev_conf.nb_vchans &&
+	    vchan != RTE_DMA_ALL_VCHAN) {
+		RTE_DMA_LOG(ERR,
+			"Device %d vchan %u out of range", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+	memset(stats, 0, sizeof(struct rte_dma_stats));
+	return (*dev->dev_ops->stats_get)(dev, vchan, stats,
+					  sizeof(struct rte_dma_stats));
+}
+
+int
+rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
+{
+	struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+
+	if (!rte_dma_is_valid(dev_id))
+		return -EINVAL;
+
+	if (vchan >= dev->dev_conf.nb_vchans &&
+	    vchan != RTE_DMA_ALL_VCHAN) {
+		RTE_DMA_LOG(ERR,
+			"Device %d vchan %u out of range", dev_id, vchan);
+		return -EINVAL;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
+	return (*dev->dev_ops->stats_reset)(dev, vchan);
+}
+
+static const char *
+dma_capability_name(uint64_t capability)
+{
+	static const struct {
+		uint64_t capability;
+		const char *name;
+	} capa_names[] = {
+		{ RTE_DMA_CAPA_MEM_TO_MEM,  "mem2mem" },
+		{ RTE_DMA_CAPA_MEM_TO_DEV,  "mem2dev" },
+		{ RTE_DMA_CAPA_DEV_TO_MEM,  "dev2mem" },
+		{ RTE_DMA_CAPA_DEV_TO_DEV,  "dev2dev" },
+		{ RTE_DMA_CAPA_SVA,         "sva"     },
+		{ RTE_DMA_CAPA_SILENT,      "silent"  },
+		{ RTE_DMA_CAPA_OPS_COPY,    "copy"    },
+		{ RTE_DMA_CAPA_OPS_COPY_SG, "copy_sg" },
+		{ RTE_DMA_CAPA_OPS_FILL,    "fill"    },
+	};
+
+	const char *name = "unknown";
+	uint32_t i;
+
+	for (i = 0; i < RTE_DIM(capa_names); i++) {
+		if (capability == capa_names[i].capability) {
+			name = capa_names[i].name;
+			break;
+		}
+	}
+
+	return name;
+}
+
+static void
+dma_dump_capability(FILE *f, uint64_t dev_capa)
+{
+	uint64_t capa;
+
+	(void)fprintf(f, "  dev_capa: 0x%" PRIx64 " -", dev_capa);
+	while (dev_capa > 0) {
+		capa = 1ull << __builtin_ctzll(dev_capa);
+		(void)fprintf(f, " %s", dma_capability_name(capa));
+		dev_capa &= ~capa;
+	}
+	(void)fprintf(f, "\n");
+}
+
+int
+rte_dma_dump(int16_t dev_id, FILE *f)
+{
+	const struct rte_dma_dev *dev = &rte_dma_devices[dev_id];
+	struct rte_dma_info dev_info;
+	int ret;
+
+	if (!rte_dma_is_valid(dev_id) || f == NULL)
+		return -EINVAL;
+
+	ret = rte_dma_info_get(dev_id, &dev_info);
+	if (ret != 0) {
+		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
+		return -EINVAL;
+	}
+
+	(void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
+		dev->dev_id,
+		dev->dev_name,
+		dev->dev_started ? "started" : "stopped");
+	dma_dump_capability(f, dev_info.dev_capa);
+	(void)fprintf(f, "  max_vchans_supported: %u\n", dev_info.max_vchans);
+	(void)fprintf(f, "  nb_vchans_configured: %u\n", dev_info.nb_vchans);
+	(void)fprintf(f, "  silent_mode: %s\n",
+		dev->dev_conf.enable_silent ? "on" : "off");
+
+	if (dev->dev_ops->dev_dump != NULL)
+		return (*dev->dev_ops->dev_dump)(dev, f);
+
+	return 0;
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 87810f2f08..03ed304709 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -48,6 +48,29 @@
  * This framework uses 'int16_t dev_id' as the device identifier of a dmadev,
  * and 'uint16_t vchan' as the virtual DMA channel identifier in one dmadev.
  *
+ * The functions exported by the dmadev API to setup a device designated by its
+ * device identifier must be invoked in the following order:
+ *     - rte_dma_configure()
+ *     - rte_dma_vchan_setup()
+ *     - rte_dma_start()
+ *
+ * Then, the application can invoke dataplane functions to process jobs.
+ *
+ * If the application wants to change the configuration (i.e. invoke
+ * rte_dma_configure() or rte_dma_vchan_setup()), it must invoke
+ * rte_dma_stop() first to stop the device and then do the reconfiguration
+ * before invoking rte_dma_start() again. The dataplane functions should not
+ * be invoked when the device is stopped.
+ *
+ * Finally, an application can close a dmadev by invoking the rte_dma_close()
+ * function.
+ *
+ * About MT-safe, all the functions of the dmadev API implemented by a PMD are
+ * lock-free functions which assume to not be invoked in parallel on different
+ * logical cores to work on the same target dmadev object.
+ * @note Different virtual DMA channels on the same dmadev *DO NOT* support
+ * parallel invocation because these virtual DMA channels share the same
+ * HW-DMA-channel.
  */
 
 #include <stdint.h>
@@ -126,6 +149,448 @@ bool rte_dma_is_valid(int16_t dev_id);
 __rte_experimental
 uint16_t rte_dma_count_avail(void);
 
+/**@{@name DMA capability
+ * @see struct rte_dma_info::dev_capa
+ */
+/** Support memory-to-memory transfer */
+#define RTE_DMA_CAPA_MEM_TO_MEM		RTE_BIT64(0)
+/** Support memory-to-device transfer. */
+#define RTE_DMA_CAPA_MEM_TO_DEV		RTE_BIT64(1)
+/** Support device-to-memory transfer. */
+#define RTE_DMA_CAPA_DEV_TO_MEM		RTE_BIT64(2)
+/** Support device-to-device transfer. */
+#define RTE_DMA_CAPA_DEV_TO_DEV		RTE_BIT64(3)
+/** Support SVA which could use VA as DMA address.
+ * If device support SVA then application could pass any VA address like memory
+ * from rte_malloc(), rte_memzone(), malloc, stack memory.
+ * If device don't support SVA, then application should pass IOVA address which
+ * from rte_malloc(), rte_memzone().
+ */
+#define RTE_DMA_CAPA_SVA                RTE_BIT64(4)
+/** Support work in silent mode.
+ * In this mode, application don't required to invoke rte_dma_completed*()
+ * API.
+ * @see struct rte_dma_conf::silent_mode
+ */
+#define RTE_DMA_CAPA_SILENT             RTE_BIT64(5)
+/** Support copy operation.
+ * This capability start with index of 32, so that it could leave gap between
+ * normal capability and ops capability.
+ */
+#define RTE_DMA_CAPA_OPS_COPY           RTE_BIT64(32)
+/** Support scatter-gather list copy operation. */
+#define RTE_DMA_CAPA_OPS_COPY_SG	RTE_BIT64(33)
+/** Support fill operation. */
+#define RTE_DMA_CAPA_OPS_FILL		RTE_BIT64(34)
+/**@}*/
+
+/**
+ * A structure used to retrieve the information of a DMA device.
+ *
+ * @see rte_dma_info_get
+ */
+struct rte_dma_info {
+	const char *dev_name; /**< Unique device name. */
+	/** Device capabilities (RTE_DMA_CAPA_*). */
+	uint64_t dev_capa;
+	/** Maximum number of virtual DMA channels supported. */
+	uint16_t max_vchans;
+	/** Maximum allowed number of virtual DMA channel descriptors. */
+	uint16_t max_desc;
+	/** Minimum allowed number of virtual DMA channel descriptors. */
+	uint16_t min_desc;
+	/** Maximum number of source or destination scatter-gather entry
+	 * supported.
+	 * If the device does not support COPY_SG capability, this value can be
+	 * zero.
+	 * If the device supports COPY_SG capability, then rte_dma_copy_sg()
+	 * parameter nb_src/nb_dst should not exceed this value.
+	 */
+	uint16_t max_sges;
+	/** NUMA node connection, -1 if unknown. */
+	int16_t numa_node;
+	/** Number of virtual DMA channel configured. */
+	uint16_t nb_vchans;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve information of a DMA device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param[out] dev_info
+ *   A pointer to a structure of type *rte_dma_info* to be filled with the
+ *   information of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info);
+
+/**
+ * A structure used to configure a DMA device.
+ *
+ * @see rte_dma_configure
+ */
+struct rte_dma_conf {
+	/** The number of virtual DMA channels to set up for the DMA device.
+	 * This value cannot be greater than the field 'max_vchans' of struct
+	 * rte_dma_info which get from rte_dma_info_get().
+	 */
+	uint16_t nb_vchans;
+	/** Indicates whether to enable silent mode.
+	 * false-default mode, true-silent mode.
+	 * This value can be set to true only when the SILENT capability is
+	 * supported.
+	 *
+	 * @see RTE_DMA_CAPA_SILENT
+	 */
+	bool enable_silent;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Configure a DMA device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device to configure.
+ * @param dev_conf
+ *   The DMA device configuration structure encapsulated into rte_dma_conf
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Start a DMA device.
+ *
+ * The device start step is the last one and consists of setting the DMA
+ * to start accepting jobs.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_start(int16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Stop a DMA device.
+ *
+ * The device can be restarted with a call to rte_dma_start().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stop(int16_t dev_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Close a DMA device.
+ *
+ * The device cannot be restarted after this call.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_close(int16_t dev_id);
+
+/**
+ * DMA transfer direction defines.
+ *
+ * @see struct rte_dma_vchan_conf::direction
+ */
+enum rte_dma_direction {
+	/** DMA transfer direction - from memory to memory.
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_MEM,
+	/** DMA transfer direction - from memory to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from memory
+	 * (which is SoCs memory) to device (which is host memory).
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_MEM_TO_DEV,
+	/** DMA transfer direction - from device to memory.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to memory (which is SoCs memory).
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_MEM,
+	/** DMA transfer direction - from device to device.
+	 * In a typical scenario, the SoCs are installed on host servers as
+	 * iNICs through the PCIe interface. In this case, the SoCs works in
+	 * EP(endpoint) mode, it could initiate a DMA move request from device
+	 * (which is host memory) to the device (which is another host memory).
+	 *
+	 * @see struct rte_dma_vchan_conf::direction
+	 */
+	RTE_DMA_DIR_DEV_TO_DEV,
+};
+
+/**
+ * DMA access port type defines.
+ *
+ * @see struct rte_dma_port_param::port_type
+ */
+enum rte_dma_port_type {
+	RTE_DMA_PORT_NONE,
+	RTE_DMA_PORT_PCIE, /**< The DMA access port is PCIe. */
+};
+
+/**
+ * A structure used to descript DMA access port parameters.
+ *
+ * @see struct rte_dma_vchan_conf::src_port
+ * @see struct rte_dma_vchan_conf::dst_port
+ */
+struct rte_dma_port_param {
+	/** The device access port type.
+	 *
+	 * @see enum rte_dma_port_type
+	 */
+	enum rte_dma_port_type port_type;
+	RTE_STD_C11
+	union {
+		/** PCIe access port parameters.
+		 *
+		 * The following model shows SoC's PCIe module connects to
+		 * multiple PCIe hosts and multiple endpoints. The PCIe module
+		 * has an integrated DMA controller.
+		 *
+		 * If the DMA wants to access the memory of host A, it can be
+		 * initiated by PF1 in core0, or by VF0 of PF0 in core0.
+		 *
+		 * \code{.unparsed}
+		 * System Bus
+		 *    |     ----------PCIe module----------
+		 *    |     Bus
+		 *    |     Interface
+		 *    |     -----        ------------------
+		 *    |     |   |        | PCIe Core0     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host A  |
+		 *    |     |   |--------|        |- VF-1 |--------| Root    |
+		 *    |     |   |        |   PF-1         |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |   |        | PCIe Core1     |
+		 *    |     |   |        |                |        -----------
+		 *    |     |   |        |   PF-0 -- VF-0 |        | Host B  |
+		 *    |-----|   |--------|   PF-1 -- VF-0 |--------| Root    |
+		 *    |     |   |        |        |- VF-1 |        | Complex |
+		 *    |     |   |        |   PF-2         |        -----------
+		 *    |     |   |        ------------------
+		 *    |     |   |
+		 *    |     |   |        ------------------
+		 *    |     |DMA|        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |--------| PCIe Core2     |        ------
+		 *    |     |   |        |                |        ------
+		 *    |     |   |        |                |--------| EP |
+		 *    |     |   |        |                |        ------
+		 *    |     -----        ------------------
+		 *
+		 * \endcode
+		 *
+		 * @note If some fields can not be supported by the
+		 * hardware/driver, then the driver ignores those fields.
+		 * Please check driver-specific documentation for limitations
+		 * and capablites.
+		 */
+		__extension__
+		struct {
+			uint64_t coreid : 4; /**< PCIe core id used. */
+			uint64_t pfid : 8; /**< PF id used. */
+			uint64_t vfen : 1; /**< VF enable bit. */
+			uint64_t vfid : 16; /**< VF id used. */
+			/** The pasid filed in TLP packet. */
+			uint64_t pasid : 20;
+			/** The attributes filed in TLP packet. */
+			uint64_t attr : 3;
+			/** The processing hint filed in TLP packet. */
+			uint64_t ph : 2;
+			/** The steering tag filed in TLP packet. */
+			uint64_t st : 16;
+		} pcie;
+	};
+	uint64_t reserved[2]; /**< Reserved for future fields. */
+};
+
+/**
+ * A structure used to configure a virtual DMA channel.
+ *
+ * @see rte_dma_vchan_setup
+ */
+struct rte_dma_vchan_conf {
+	/** Transfer direction
+	 *
+	 * @see enum rte_dma_direction
+	 */
+	enum rte_dma_direction direction;
+	/** Number of descriptor for the virtual DMA channel */
+	uint16_t nb_desc;
+	/** 1) Used to describes the device access port parameter in the
+	 * device-to-memory transfer scenario.
+	 * 2) Used to describes the source device access port parameter in the
+	 * device-to-device transfer scenario.
+	 *
+	 * @see struct rte_dma_port_param
+	 */
+	struct rte_dma_port_param src_port;
+	/** 1) Used to describes the device access port parameter in the
+	 * memory-to-device transfer scenario.
+	 * 2) Used to describes the destination device access port parameter in
+	 * the device-to-device transfer scenario.
+	 *
+	 * @see struct rte_dma_port_param
+	 */
+	struct rte_dma_port_param dst_port;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate and set up a virtual DMA channel.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel. The value must be in the range
+ *   [0, nb_vchans - 1] previously supplied to rte_dma_configure().
+ * @param conf
+ *   The virtual DMA channel configuration structure encapsulated into
+ *   rte_dma_vchan_conf object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
+			const struct rte_dma_vchan_conf *conf);
+
+/**
+ * A structure used to retrieve statistics.
+ *
+ * @see rte_dma_stats_get
+ */
+struct rte_dma_stats {
+	/** Count of operations which were submitted to hardware. */
+	uint64_t submitted;
+	/** Count of operations which were completed, including successful and
+	 * failed completions.
+	 */
+	uint64_t completed;
+	/** Count of operations which failed to complete. */
+	uint64_t errors;
+};
+
+/**
+ * Special ID, which is used to represent all virtual DMA channels.
+ *
+ * @see rte_dma_stats_get
+ * @see rte_dma_stats_reset
+ */
+#define RTE_DMA_ALL_VCHAN	0xFFFFu
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMA_ALL_VCHAN means all channels.
+ * @param[out] stats
+ *   The basic statistics structure encapsulated into rte_dma_stats
+ *   object.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stats_get(int16_t dev_id, uint16_t vchan,
+		      struct rte_dma_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Reset basic statistics of a or all virtual DMA channel(s).
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *   If equal RTE_DMA_ALL_VCHAN means all channels.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_stats_reset(int16_t dev_id, uint16_t vchan);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dump DMA device info.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param f
+ *   The file to write the output to.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+int rte_dma_dump(int16_t dev_id, FILE *f);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
index bb09382dce..5fcf0f60b8 100644
--- a/lib/dmadev/rte_dmadev_pmd.h
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -20,6 +20,62 @@
 extern "C" {
 #endif
 
+struct rte_dma_dev;
+
+/** @internal Used to get device information of a device. */
+typedef int (*rte_dma_info_get_t)(const struct rte_dma_dev *dev,
+				  struct rte_dma_info *dev_info,
+				  uint32_t info_sz);
+
+/** @internal Used to configure a device. */
+typedef int (*rte_dma_configure_t)(struct rte_dma_dev *dev,
+				   const struct rte_dma_conf *dev_conf,
+				   uint32_t conf_sz);
+
+/** @internal Used to start a configured device. */
+typedef int (*rte_dma_start_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to stop a configured device. */
+typedef int (*rte_dma_stop_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to close a configured device. */
+typedef int (*rte_dma_close_t)(struct rte_dma_dev *dev);
+
+/** @internal Used to allocate and set up a virtual DMA channel. */
+typedef int (*rte_dma_vchan_setup_t)(struct rte_dma_dev *dev, uint16_t vchan,
+				const struct rte_dma_vchan_conf *conf,
+				uint32_t conf_sz);
+
+/** @internal Used to retrieve basic statistics. */
+typedef int (*rte_dma_stats_get_t)(const struct rte_dma_dev *dev,
+			uint16_t vchan, struct rte_dma_stats *stats,
+			uint32_t stats_sz);
+
+/** @internal Used to reset basic statistics. */
+typedef int (*rte_dma_stats_reset_t)(struct rte_dma_dev *dev, uint16_t vchan);
+
+/** @internal Used to dump internal information. */
+typedef int (*rte_dma_dump_t)(const struct rte_dma_dev *dev, FILE *f);
+
+/**
+ * DMA device operations function pointer table.
+ *
+ * @see struct rte_dma_dev:dev_ops
+ */
+struct rte_dma_dev_ops {
+	rte_dma_info_get_t         dev_info_get;
+	rte_dma_configure_t        dev_configure;
+	rte_dma_start_t            dev_start;
+	rte_dma_stop_t             dev_stop;
+	rte_dma_close_t            dev_close;
+
+	rte_dma_vchan_setup_t      vchan_setup;
+
+	rte_dma_stats_get_t        stats_get;
+	rte_dma_stats_reset_t      stats_reset;
+
+	rte_dma_dump_t             dev_dump;
+};
 /**
  * Possible states of a DMA device.
  *
@@ -44,7 +100,12 @@ struct rte_dma_dev {
 	void *dev_private; /**< PMD-specific private data. */
 	/** Device info which supplied during device initialization. */
 	struct rte_device *device;
+	/** Functions implemented by PMD. */
+	const struct rte_dma_dev_ops *dev_ops;
+	struct rte_dma_conf dev_conf; /**< DMA device configuration. */
 	enum rte_dma_dev_state state; /**< Flag indicating the device state. */
+	__extension__
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
 	uint64_t reserved[2]; /**< Reserved for future fields. */
 } __rte_cache_aligned;
 
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index f8a0076468..e925dfcd6d 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -1,10 +1,19 @@
 EXPERIMENTAL {
 	global:
 
+	rte_dma_close;
+	rte_dma_configure;
 	rte_dma_count_avail;
 	rte_dma_dev_max;
+	rte_dma_dump;
 	rte_dma_get_dev_id_by_name;
+	rte_dma_info_get;
 	rte_dma_is_valid;
+	rte_dma_start;
+	rte_dma_stats_get;
+	rte_dma_stats_reset;
+	rte_dma_stop;
+	rte_dma_vchan_setup;
 
 	local: *;
 };
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v26 3/6] dmadev: add data plane API support
  2021-10-13 12:24 ` [dpdk-dev] [PATCH v26 0/6] support dmadev Chengwen Feng
  2021-10-13 12:24   ` [dpdk-dev] [PATCH v26 1/6] dmadev: introduce DMA device library Chengwen Feng
  2021-10-13 12:24   ` [dpdk-dev] [PATCH v26 2/6] dmadev: add control plane API support Chengwen Feng
@ 2021-10-13 12:24   ` Chengwen Feng
  2021-10-13 12:24   ` [dpdk-dev] [PATCH v26 4/6] dmadev: add multi-process support Chengwen Feng
                     ` (3 subsequent siblings)
  6 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-10-13 12:24 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch add data plane API for dmadev.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 doc/guides/prog_guide/dmadev.rst |  22 ++
 lib/dmadev/meson.build           |   1 +
 lib/dmadev/rte_dmadev.c          | 112 ++++++++
 lib/dmadev/rte_dmadev.h          | 451 +++++++++++++++++++++++++++++++
 lib/dmadev/rte_dmadev_core.h     |  76 ++++++
 lib/dmadev/rte_dmadev_pmd.h      |   2 +
 lib/dmadev/version.map           |   7 +
 7 files changed, 671 insertions(+)
 create mode 100644 lib/dmadev/rte_dmadev_core.h

diff --git a/doc/guides/prog_guide/dmadev.rst b/doc/guides/prog_guide/dmadev.rst
index b1b5bff639..32f7147862 100644
--- a/doc/guides/prog_guide/dmadev.rst
+++ b/doc/guides/prog_guide/dmadev.rst
@@ -66,3 +66,25 @@ can be used to get the device info and supported features.
 
 Silent mode is a special device capability which does not require the
 application to invoke dequeue APIs.
+
+
+Enqueue / Dequeue APIs
+~~~~~~~~~~~~~~~~~~~~~~
+
+Enqueue APIs such as ``rte_dma_copy`` and ``rte_dma_fill`` can be used to
+enqueue operations to hardware. If an enqueue is successful, a ``ring_idx`` is
+returned. This ``ring_idx`` can be used by applications to track per operation
+metadata in an application-defined circular ring.
+
+The ``rte_dma_submit`` API is used to issue doorbell to hardware.
+Alternatively the ``RTE_DMA_OP_FLAG_SUBMIT`` flag can be passed to the enqueue
+APIs to also issue the doorbell to hardware.
+
+There are two dequeue APIs ``rte_dma_completed`` and
+``rte_dma_completed_status``, these are used to obtain the results of the
+enqueue requests. ``rte_dma_completed`` will return the number of successfully
+completed operations. ``rte_dma_completed_status`` will return the number of
+completed operations along with the status of each operation (filled into the
+``status`` array passed by user). These two APIs can also return the last
+completed operation's ``ring_idx`` which could help user track operations within
+their own application-defined rings.
diff --git a/lib/dmadev/meson.build b/lib/dmadev/meson.build
index f8d54c6e74..d2fc85e8c7 100644
--- a/lib/dmadev/meson.build
+++ b/lib/dmadev/meson.build
@@ -3,4 +3,5 @@
 
 sources = files('rte_dmadev.c')
 headers = files('rte_dmadev.h')
+indirect_headers += files('rte_dmadev_core.h')
 driver_sdk_headers += files('rte_dmadev_pmd.h')
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
index 9223ae1c95..54e15817fa 100644
--- a/lib/dmadev/rte_dmadev.c
+++ b/lib/dmadev/rte_dmadev.c
@@ -17,6 +17,7 @@
 
 static int16_t dma_devices_max;
 
+struct rte_dma_fp_object *rte_dma_fp_objs;
 struct rte_dma_dev *rte_dma_devices;
 
 RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
@@ -97,6 +98,38 @@ dma_find_by_name(const char *name)
 	return NULL;
 }
 
+static void dma_fp_object_dummy(struct rte_dma_fp_object *obj);
+
+static int
+dma_fp_data_prepare(void)
+{
+	size_t size;
+	void *ptr;
+	int i;
+
+	if (rte_dma_fp_objs != NULL)
+		return 0;
+
+	/* Fast-path object must align cacheline, but the return value of malloc
+	 * may not be aligned to the cache line. Therefore, extra memory is
+	 * applied for realignment.
+	 * note: We do not call posix_memalign/aligned_alloc because it is
+	 * version dependent on libc.
+	 */
+	size = dma_devices_max * sizeof(struct rte_dma_fp_object) +
+		RTE_CACHE_LINE_SIZE;
+	ptr = malloc(size);
+	if (ptr == NULL)
+		return -ENOMEM;
+	memset(ptr, 0, size);
+
+	rte_dma_fp_objs = RTE_PTR_ALIGN(ptr, RTE_CACHE_LINE_SIZE);
+	for (i = 0; i < dma_devices_max; i++)
+		dma_fp_object_dummy(&rte_dma_fp_objs[i]);
+
+	return 0;
+}
+
 static int
 dma_dev_data_prepare(void)
 {
@@ -117,8 +150,15 @@ dma_dev_data_prepare(void)
 static int
 dma_data_prepare(void)
 {
+	int ret;
+
 	if (dma_devices_max == 0)
 		dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
+
+	ret = dma_fp_data_prepare();
+	if (ret)
+		return ret;
+
 	return dma_dev_data_prepare();
 }
 
@@ -161,6 +201,8 @@ dma_allocate(const char *name, int numa_node, size_t private_data_size)
 	dev->dev_id = dev_id;
 	dev->numa_node = numa_node;
 	dev->dev_private = dev_private;
+	dev->fp_obj = &rte_dma_fp_objs[dev_id];
+	dma_fp_object_dummy(dev->fp_obj);
 
 	return dev;
 }
@@ -169,6 +211,7 @@ static void
 dma_release(struct rte_dma_dev *dev)
 {
 	rte_free(dev->dev_private);
+	dma_fp_object_dummy(dev->fp_obj);
 	memset(dev, 0, sizeof(struct rte_dma_dev));
 }
 
@@ -605,3 +648,72 @@ rte_dma_dump(int16_t dev_id, FILE *f)
 
 	return 0;
 }
+
+static int
+dummy_copy(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
+	   __rte_unused rte_iova_t src, __rte_unused rte_iova_t dst,
+	   __rte_unused uint32_t length, __rte_unused uint64_t flags)
+{
+	RTE_DMA_LOG(ERR, "copy is not configured or not supported.");
+	return -EINVAL;
+}
+
+static int
+dummy_copy_sg(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
+	      __rte_unused const struct rte_dma_sge *src,
+	      __rte_unused const struct rte_dma_sge *dst,
+	      __rte_unused uint16_t nb_src, __rte_unused uint16_t nb_dst,
+	      __rte_unused uint64_t flags)
+{
+	RTE_DMA_LOG(ERR, "copy_sg is not configured or not supported.");
+	return -EINVAL;
+}
+
+static int
+dummy_fill(__rte_unused void *dev_private, __rte_unused uint16_t vchan,
+	   __rte_unused uint64_t pattern, __rte_unused rte_iova_t dst,
+	   __rte_unused uint32_t length, __rte_unused uint64_t flags)
+{
+	RTE_DMA_LOG(ERR, "fill is not configured or not supported.");
+	return -EINVAL;
+}
+
+static int
+dummy_submit(__rte_unused void *dev_private, __rte_unused uint16_t vchan)
+{
+	RTE_DMA_LOG(ERR, "submit is not configured or not supported.");
+	return -EINVAL;
+}
+
+static uint16_t
+dummy_completed(__rte_unused void *dev_private,	__rte_unused uint16_t vchan,
+		__rte_unused const uint16_t nb_cpls,
+		__rte_unused uint16_t *last_idx, __rte_unused bool *has_error)
+{
+	RTE_DMA_LOG(ERR, "completed is not configured or not supported.");
+	return 0;
+}
+
+static uint16_t
+dummy_completed_status(__rte_unused void *dev_private,
+		       __rte_unused uint16_t vchan,
+		       __rte_unused const uint16_t nb_cpls,
+		       __rte_unused uint16_t *last_idx,
+		       __rte_unused enum rte_dma_status_code *status)
+{
+	RTE_DMA_LOG(ERR,
+		    "completed_status is not configured or not supported.");
+	return 0;
+}
+
+static void
+dma_fp_object_dummy(struct rte_dma_fp_object *obj)
+{
+	obj->dev_private      = NULL;
+	obj->copy             = dummy_copy;
+	obj->copy_sg          = dummy_copy_sg;
+	obj->fill             = dummy_fill;
+	obj->submit           = dummy_submit;
+	obj->completed        = dummy_completed;
+	obj->completed_status = dummy_completed_status;
+}
diff --git a/lib/dmadev/rte_dmadev.h b/lib/dmadev/rte_dmadev.h
index 03ed304709..e46c001404 100644
--- a/lib/dmadev/rte_dmadev.h
+++ b/lib/dmadev/rte_dmadev.h
@@ -65,6 +65,77 @@
  * Finally, an application can close a dmadev by invoking the rte_dma_close()
  * function.
  *
+ * The dataplane APIs include two parts:
+ * The first part is the submission of operation requests:
+ *     - rte_dma_copy()
+ *     - rte_dma_copy_sg()
+ *     - rte_dma_fill()
+ *     - rte_dma_submit()
+ *
+ * These APIs could work with different virtual DMA channels which have
+ * different contexts.
+ *
+ * The first three APIs are used to submit the operation request to the virtual
+ * DMA channel, if the submission is successful, a positive
+ * ring_idx <= UINT16_MAX is returned, otherwise a negative number is returned.
+ *
+ * The last API is used to issue doorbell to hardware, and also there are flags
+ * (@see RTE_DMA_OP_FLAG_SUBMIT) parameter of the first three APIs could do the
+ * same work.
+ * @note When enqueuing a set of jobs to the device, having a separate submit
+ * outside a loop makes for clearer code than having a check for the last
+ * iteration inside the loop to set a special submit flag.  However, for cases
+ * where one item alone is to be submitted or there is a small set of jobs to
+ * be submitted sequentially, having a submit flag provides a lower-overhead
+ * way of doing the submission while still keeping the code clean.
+ *
+ * The second part is to obtain the result of requests:
+ *     - rte_dma_completed()
+ *         - return the number of operation requests completed successfully.
+ *     - rte_dma_completed_status()
+ *         - return the number of operation requests completed.
+ *
+ * @note If the dmadev works in silent mode (@see RTE_DMA_CAPA_SILENT),
+ * application does not invoke the above two completed APIs.
+ *
+ * About the ring_idx which enqueue APIs (e.g. rte_dma_copy(), rte_dma_fill())
+ * return, the rules are as follows:
+ *     - ring_idx for each virtual DMA channel are independent.
+ *     - For a virtual DMA channel, the ring_idx is monotonically incremented,
+ *       when it reach UINT16_MAX, it wraps back to zero.
+ *     - This ring_idx can be used by applications to track per-operation
+ *       metadata in an application-defined circular ring.
+ *     - The initial ring_idx of a virtual DMA channel is zero, after the
+ *       device is stopped, the ring_idx needs to be reset to zero.
+ *
+ * One example:
+ *     - step-1: start one dmadev
+ *     - step-2: enqueue a copy operation, the ring_idx return is 0
+ *     - step-3: enqueue a copy operation again, the ring_idx return is 1
+ *     - ...
+ *     - step-101: stop the dmadev
+ *     - step-102: start the dmadev
+ *     - step-103: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *     - step-x+0: enqueue a fill operation, the ring_idx return is 65535
+ *     - step-x+1: enqueue a copy operation, the ring_idx return is 0
+ *     - ...
+ *
+ * The DMA operation address used in enqueue APIs (i.e. rte_dma_copy(),
+ * rte_dma_copy_sg(), rte_dma_fill()) is defined as rte_iova_t type.
+ *
+ * The dmadev supports two types of address: memory address and device address.
+ *
+ * - memory address: the source and destination address of the memory-to-memory
+ * transfer type, or the source address of the memory-to-device transfer type,
+ * or the destination address of the device-to-memory transfer type.
+ * @note If the device support SVA (@see RTE_DMA_CAPA_SVA), the memory address
+ * can be any VA address, otherwise it must be an IOVA address.
+ *
+ * - device address: the source and destination address of the device-to-device
+ * transfer type, or the source address of the device-to-memory transfer type,
+ * or the destination address of the memory-to-device transfer type.
+ *
  * About MT-safe, all the functions of the dmadev API implemented by a PMD are
  * lock-free functions which assume to not be invoked in parallel on different
  * logical cores to work on the same target dmadev object.
@@ -591,6 +662,386 @@ int rte_dma_stats_reset(int16_t dev_id, uint16_t vchan);
 __rte_experimental
 int rte_dma_dump(int16_t dev_id, FILE *f);
 
+/**
+ * DMA transfer result status code defines.
+ *
+ * @see rte_dma_completed_status
+ */
+enum rte_dma_status_code {
+	/** The operation completed successfully. */
+	RTE_DMA_STATUS_SUCCESSFUL,
+	/** The operation failed to complete due abort by user.
+	 * This is mainly used when processing dev_stop, user could modidy the
+	 * descriptors (e.g. change one bit to tell hardware abort this job),
+	 * it allows outstanding requests to be complete as much as possible,
+	 * so reduce the time to stop the device.
+	 */
+	RTE_DMA_STATUS_USER_ABORT,
+	/** The operation failed to complete due to following scenarios:
+	 * The jobs in a particular batch are not attempted because they
+	 * appeared after a fence where a previous job failed. In some HW
+	 * implementation it's possible for jobs from later batches would be
+	 * completed, though, so report the status from the not attempted jobs
+	 * before reporting those newer completed jobs.
+	 */
+	RTE_DMA_STATUS_NOT_ATTEMPTED,
+	/** The operation failed to complete due invalid source address. */
+	RTE_DMA_STATUS_INVALID_SRC_ADDR,
+	/** The operation failed to complete due invalid destination address. */
+	RTE_DMA_STATUS_INVALID_DST_ADDR,
+	/** The operation failed to complete due invalid source or destination
+	 * address, cover the case that only knows the address error, but not
+	 * sure which address error.
+	 */
+	RTE_DMA_STATUS_INVALID_ADDR,
+	/** The operation failed to complete due invalid length. */
+	RTE_DMA_STATUS_INVALID_LENGTH,
+	/** The operation failed to complete due invalid opcode.
+	 * The DMA descriptor could have multiple format, which are
+	 * distinguished by the opcode field.
+	 */
+	RTE_DMA_STATUS_INVALID_OPCODE,
+	/** The operation failed to complete due bus read error. */
+	RTE_DMA_STATUS_BUS_READ_ERROR,
+	/** The operation failed to complete due bus write error. */
+	RTE_DMA_STATUS_BUS_WRITE_ERROR,
+	/** The operation failed to complete due bus error, cover the case that
+	 * only knows the bus error, but not sure which direction error.
+	 */
+	RTE_DMA_STATUS_BUS_ERROR,
+	/** The operation failed to complete due data poison. */
+	RTE_DMA_STATUS_DATA_POISION,
+	/** The operation failed to complete due descriptor read error. */
+	RTE_DMA_STATUS_DESCRIPTOR_READ_ERROR,
+	/** The operation failed to complete due device link error.
+	 * Used to indicates that the link error in the memory-to-device/
+	 * device-to-memory/device-to-device transfer scenario.
+	 */
+	RTE_DMA_STATUS_DEV_LINK_ERROR,
+	/** The operation failed to complete due lookup page fault. */
+	RTE_DMA_STATUS_PAGE_FAULT,
+	/** The operation failed to complete due unknown reason.
+	 * The initial value is 256, which reserves space for future errors.
+	 */
+	RTE_DMA_STATUS_ERROR_UNKNOWN = 0x100,
+};
+
+/**
+ * A structure used to hold scatter-gather DMA operation request entry.
+ *
+ * @see rte_dma_copy_sg
+ */
+struct rte_dma_sge {
+	rte_iova_t addr; /**< The DMA operation address. */
+	uint32_t length; /**< The DMA operation length. */
+};
+
+#include "rte_dmadev_core.h"
+
+/**@{@name DMA operation flag
+ * @see rte_dma_copy()
+ * @see rte_dma_copy_sg()
+ * @see rte_dma_fill()
+ */
+/** Fence flag.
+ * It means the operation with this flag must be processed only after all
+ * previous operations are completed.
+ * If the specify DMA HW works in-order (it means it has default fence between
+ * operations), this flag could be NOP.
+ */
+#define RTE_DMA_OP_FLAG_FENCE   RTE_BIT64(0)
+/** Submit flag.
+ * It means the operation with this flag must issue doorbell to hardware after
+ * enqueued jobs.
+ */
+#define RTE_DMA_OP_FLAG_SUBMIT  RTE_BIT64(1)
+/** Write data to low level cache hint.
+ * Used for performance optimization, this is just a hint, and there is no
+ * capability bit for this, driver should not return error if this flag was set.
+ */
+#define RTE_DMA_OP_FLAG_LLC     RTE_BIT64(2)
+/**@}*/
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a copy operation onto the virtual DMA channel.
+ *
+ * This queues up a copy operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The address of the source buffer.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the data to be copied.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+static inline int
+rte_dma_copy(int16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
+	     uint32_t length, uint64_t flags)
+{
+	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*obj->copy, -ENOTSUP);
+#endif
+
+	return (*obj->copy)(obj->dev_private, vchan, src, dst, length, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a scatter-gather list copy operation onto the virtual DMA channel.
+ *
+ * This queues up a scatter-gather list copy operation to be performed by
+ * hardware, if the 'flags' parameter contains RTE_DMA_OP_FLAG_SUBMIT then
+ * trigger doorbell to begin this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param src
+ *   The pointer of source scatter-gather entry array.
+ * @param dst
+ *   The pointer of destination scatter-gather entry array.
+ * @param nb_src
+ *   The number of source scatter-gather entry.
+ *   @see struct rte_dma_info::max_sges
+ * @param nb_dst
+ *   The number of destination scatter-gather entry.
+ *   @see struct rte_dma_info::max_sges
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+static inline int
+rte_dma_copy_sg(int16_t dev_id, uint16_t vchan, struct rte_dma_sge *src,
+		struct rte_dma_sge *dst, uint16_t nb_src, uint16_t nb_dst,
+		uint64_t flags)
+{
+	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || src == NULL || dst == NULL ||
+	    nb_src == 0 || nb_dst == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*obj->copy_sg, -ENOTSUP);
+#endif
+
+	return (*obj->copy_sg)(obj->dev_private, vchan, src, dst, nb_src,
+			       nb_dst, flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue a fill operation onto the virtual DMA channel.
+ *
+ * This queues up a fill operation to be performed by hardware, if the 'flags'
+ * parameter contains RTE_DMA_OP_FLAG_SUBMIT then trigger doorbell to begin
+ * this operation, otherwise do not trigger doorbell.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param pattern
+ *   The pattern to populate the destination buffer with.
+ * @param dst
+ *   The address of the destination buffer.
+ * @param length
+ *   The length of the destination buffer.
+ * @param flags
+ *   An flags for this operation.
+ *   @see RTE_DMA_OP_FLAG_*
+ *
+ * @return
+ *   - 0..UINT16_MAX: index of enqueued job.
+ *   - -ENOSPC: if no space left to enqueue.
+ *   - other values < 0 on failure.
+ */
+__rte_experimental
+static inline int
+rte_dma_fill(int16_t dev_id, uint16_t vchan, uint64_t pattern,
+	     rte_iova_t dst, uint32_t length, uint64_t flags)
+{
+	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || length == 0)
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*obj->fill, -ENOTSUP);
+#endif
+
+	return (*obj->fill)(obj->dev_private, vchan, pattern, dst, length,
+			    flags);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Trigger hardware to begin performing enqueued operations.
+ *
+ * This API is used to write the "doorbell" to the hardware to trigger it
+ * to begin the operations previously enqueued by rte_dma_copy/fill().
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ *
+ * @return
+ *   0 on success. Otherwise negative value is returned.
+ */
+__rte_experimental
+static inline int
+rte_dma_submit(int16_t dev_id, uint16_t vchan)
+{
+	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id))
+		return -EINVAL;
+	RTE_FUNC_PTR_OR_ERR_RET(*obj->submit, -ENOTSUP);
+#endif
+
+	return (*obj->submit)(obj->dev_private, vchan);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Return the number of operations that have been successfully completed.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   The maximum number of completed operations that can be processed.
+ * @param[out] last_idx
+ *   The last completed operation's ring_idx.
+ *   If not required, NULL can be passed in.
+ * @param[out] has_error
+ *   Indicates if there are transfer error.
+ *   If not required, NULL can be passed in.
+ *
+ * @return
+ *   The number of operations that successfully completed. This return value
+ *   must be less than or equal to the value of nb_cpls.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dma_completed(int16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
+		  uint16_t *last_idx, bool *has_error)
+{
+	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+	uint16_t idx;
+	bool err;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || nb_cpls == 0)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*obj->completed, 0);
+#endif
+
+	/* Ensure the pointer values are non-null to simplify drivers.
+	 * In most cases these should be compile time evaluated, since this is
+	 * an inline function.
+	 * - If NULL is explicitly passed as parameter, then compiler knows the
+	 *   value is NULL
+	 * - If address of local variable is passed as parameter, then compiler
+	 *   can know it's non-NULL.
+	 */
+	if (last_idx == NULL)
+		last_idx = &idx;
+	if (has_error == NULL)
+		has_error = &err;
+
+	*has_error = false;
+	return (*obj->completed)(obj->dev_private, vchan, nb_cpls, last_idx,
+				 has_error);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Return the number of operations that have been completed, and the operations
+ * result may succeed or fail.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param vchan
+ *   The identifier of virtual DMA channel.
+ * @param nb_cpls
+ *   Indicates the size of status array.
+ * @param[out] last_idx
+ *   The last completed operation's ring_idx.
+ *   If not required, NULL can be passed in.
+ * @param[out] status
+ *   This is a pointer to an array of length 'nb_cpls' that holds the completion
+ *   status code of each operation.
+ *   @see enum rte_dma_status_code
+ *
+ * @return
+ *   The number of operations that completed. This return value must be less
+ *   than or equal to the value of nb_cpls.
+ *   If this number is greater than zero (assuming n), then n values in the
+ *   status array are also set.
+ */
+__rte_experimental
+static inline uint16_t
+rte_dma_completed_status(int16_t dev_id, uint16_t vchan,
+			 const uint16_t nb_cpls, uint16_t *last_idx,
+			 enum rte_dma_status_code *status)
+{
+	struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
+	uint16_t idx;
+
+#ifdef RTE_DMADEV_DEBUG
+	if (!rte_dma_is_valid(dev_id) || nb_cpls == 0 || status == NULL)
+		return 0;
+	RTE_FUNC_PTR_OR_ERR_RET(*obj->completed_status, 0);
+#endif
+
+	if (last_idx == NULL)
+		last_idx = &idx;
+
+	return (*obj->completed_status)(obj->dev_private, vchan, nb_cpls,
+					last_idx, status);
+}
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
new file mode 100644
index 0000000000..a6946052db
--- /dev/null
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#ifndef RTE_DMADEV_CORE_H
+#define RTE_DMADEV_CORE_H
+
+/**
+ * @file
+ *
+ * DMA Device internal header.
+ *
+ * This header contains internal data types which are used by dataplane inline
+ * function.
+ *
+ * Applications should not use these functions directly.
+ */
+
+/** @internal Used to enqueue a copy operation. */
+typedef int (*rte_dma_copy_t)(void *dev_private, uint16_t vchan,
+			      rte_iova_t src, rte_iova_t dst,
+			      uint32_t length, uint64_t flags);
+
+/** @internal Used to enqueue a scatter-gather list copy operation. */
+typedef int (*rte_dma_copy_sg_t)(void *dev_private, uint16_t vchan,
+				 const struct rte_dma_sge *src,
+				 const struct rte_dma_sge *dst,
+				 uint16_t nb_src, uint16_t nb_dst,
+				 uint64_t flags);
+
+/** @internal Used to enqueue a fill operation. */
+typedef int (*rte_dma_fill_t)(void *dev_private, uint16_t vchan,
+			      uint64_t pattern, rte_iova_t dst,
+			      uint32_t length, uint64_t flags);
+
+/** @internal Used to trigger hardware to begin working. */
+typedef int (*rte_dma_submit_t)(void *dev_private, uint16_t vchan);
+
+/** @internal Used to return number of successful completed operations. */
+typedef uint16_t (*rte_dma_completed_t)(void *dev_private,
+				uint16_t vchan, const uint16_t nb_cpls,
+				uint16_t *last_idx, bool *has_error);
+
+/** @internal Used to return number of completed operations. */
+typedef uint16_t (*rte_dma_completed_status_t)(void *dev_private,
+			uint16_t vchan, const uint16_t nb_cpls,
+			uint16_t *last_idx, enum rte_dma_status_code *status);
+
+/**
+ * @internal
+ * Fast-path dmadev functions and related data are hold in a flat array.
+ * One entry per dmadev.
+ *
+ * This structure occupy exactly 128B which reserve space for future IO
+ * functions.
+ *
+ * The 'dev_private' field was placed in the first cache line to optimize
+ * performance because the PMD driver mainly depends on this field.
+ */
+struct rte_dma_fp_object {
+	/** PMD-specific private data. The driver should copy
+	 * rte_dma_dev.dev_private to this field during initialization.
+	 */
+	void *dev_private;
+	rte_dma_copy_t             copy;
+	rte_dma_copy_sg_t          copy_sg;
+	rte_dma_fill_t             fill;
+	rte_dma_submit_t           submit;
+	rte_dma_completed_t        completed;
+	rte_dma_completed_status_t completed_status;
+} __rte_aligned(128);
+
+extern struct rte_dma_fp_object *rte_dma_fp_objs;
+
+#endif /* RTE_DMADEV_CORE_H */
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
index 5fcf0f60b8..d6d2161306 100644
--- a/lib/dmadev/rte_dmadev_pmd.h
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -100,6 +100,8 @@ struct rte_dma_dev {
 	void *dev_private; /**< PMD-specific private data. */
 	/** Device info which supplied during device initialization. */
 	struct rte_device *device;
+	/**< Fast-path functions and related data. */
+	struct rte_dma_fp_object *fp_obj;
 	/** Functions implemented by PMD. */
 	const struct rte_dma_dev_ops *dev_ops;
 	struct rte_dma_conf dev_conf; /**< DMA device configuration. */
diff --git a/lib/dmadev/version.map b/lib/dmadev/version.map
index e925dfcd6d..e17207b212 100644
--- a/lib/dmadev/version.map
+++ b/lib/dmadev/version.map
@@ -2,10 +2,15 @@ EXPERIMENTAL {
 	global:
 
 	rte_dma_close;
+	rte_dma_completed;
+	rte_dma_completed_status;
 	rte_dma_configure;
+	rte_dma_copy;
+	rte_dma_copy_sg;
 	rte_dma_count_avail;
 	rte_dma_dev_max;
 	rte_dma_dump;
+	rte_dma_fill;
 	rte_dma_get_dev_id_by_name;
 	rte_dma_info_get;
 	rte_dma_is_valid;
@@ -13,6 +18,7 @@ EXPERIMENTAL {
 	rte_dma_stats_get;
 	rte_dma_stats_reset;
 	rte_dma_stop;
+	rte_dma_submit;
 	rte_dma_vchan_setup;
 
 	local: *;
@@ -22,6 +28,7 @@ INTERNAL {
 	global:
 
 	rte_dma_devices;
+	rte_dma_fp_objs;
 	rte_dma_pmd_allocate;
 	rte_dma_pmd_release;
 
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v26 4/6] dmadev: add multi-process support
  2021-10-13 12:24 ` [dpdk-dev] [PATCH v26 0/6] support dmadev Chengwen Feng
                     ` (2 preceding siblings ...)
  2021-10-13 12:24   ` [dpdk-dev] [PATCH v26 3/6] dmadev: add data " Chengwen Feng
@ 2021-10-13 12:24   ` Chengwen Feng
  2021-10-13 12:24   ` [dpdk-dev] [PATCH v26 5/6] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
                     ` (2 subsequent siblings)
  6 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-10-13 12:24 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch add multi-process support for dmadev.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Acked-by: Bruce Richardson <bruce.richardson@intel.com>
Acked-by: Morten Brørup <mb@smartsharesystems.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 doc/guides/rel_notes/release_21_11.rst |   1 +
 lib/dmadev/rte_dmadev.c                | 183 ++++++++++++++++++++-----
 lib/dmadev/rte_dmadev_core.h           |   2 +-
 lib/dmadev/rte_dmadev_pmd.h            |  29 +++-
 4 files changed, 169 insertions(+), 46 deletions(-)

diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index 2953bc3e97..888f123237 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -82,6 +82,7 @@ New Features
     hardware and software DMA devices.
   * Added generic API which support a number of different DMA
     operations.
+  * Added multi-process support.
 
 * **Updated af_packet ethdev driver.**
 
diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
index 54e15817fa..7099bbb28d 100644
--- a/lib/dmadev/rte_dmadev.c
+++ b/lib/dmadev/rte_dmadev.c
@@ -19,6 +19,13 @@ static int16_t dma_devices_max;
 
 struct rte_dma_fp_object *rte_dma_fp_objs;
 struct rte_dma_dev *rte_dma_devices;
+static struct {
+	/* Hold the dev_max information of the primary process. This field is
+	 * set by the primary process and is read by the secondary process.
+	 */
+	int16_t dev_max;
+	struct rte_dma_dev_data data[0];
+} *dma_devices_shared_data;
 
 RTE_LOG_REGISTER_DEFAULT(rte_dma_logtype, INFO);
 #define RTE_DMA_LOG(level, ...) \
@@ -70,11 +77,11 @@ dma_find_free_id(void)
 {
 	int16_t i;
 
-	if (rte_dma_devices == NULL)
+	if (rte_dma_devices == NULL || dma_devices_shared_data == NULL)
 		return -1;
 
 	for (i = 0; i < dma_devices_max; i++) {
-		if (rte_dma_devices[i].state == RTE_DMA_DEV_UNUSED)
+		if (dma_devices_shared_data->data[i].dev_name[0] == '\0')
 			return i;
 	}
 
@@ -91,7 +98,7 @@ dma_find_by_name(const char *name)
 
 	for (i = 0; i < dma_devices_max; i++) {
 		if ((rte_dma_devices[i].state != RTE_DMA_DEV_UNUSED) &&
-		    (!strcmp(name, rte_dma_devices[i].dev_name)))
+		    (!strcmp(name, rte_dma_devices[i].data->dev_name)))
 			return &rte_dma_devices[i];
 	}
 
@@ -147,23 +154,71 @@ dma_dev_data_prepare(void)
 	return 0;
 }
 
+static int
+dma_shared_data_prepare(void)
+{
+	const char *mz_name = "rte_dma_dev_data";
+	const struct rte_memzone *mz;
+	size_t size;
+
+	if (dma_devices_shared_data != NULL)
+		return 0;
+
+	size = sizeof(*dma_devices_shared_data) +
+		sizeof(struct rte_dma_dev_data) * dma_devices_max;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		mz = rte_memzone_reserve(mz_name, size, rte_socket_id(), 0);
+	else
+		mz = rte_memzone_lookup(mz_name);
+	if (mz == NULL)
+		return -ENOMEM;
+
+	dma_devices_shared_data = mz->addr;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		memset(dma_devices_shared_data, 0, size);
+		dma_devices_shared_data->dev_max = dma_devices_max;
+	} else {
+		dma_devices_max = dma_devices_shared_data->dev_max;
+	}
+
+	return 0;
+}
+
 static int
 dma_data_prepare(void)
 {
 	int ret;
 
-	if (dma_devices_max == 0)
-		dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
-
-	ret = dma_fp_data_prepare();
-	if (ret)
-		return ret;
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		if (dma_devices_max == 0)
+			dma_devices_max = RTE_DMADEV_DEFAULT_MAX;
+		ret = dma_fp_data_prepare();
+		if (ret)
+			return ret;
+		ret = dma_dev_data_prepare();
+		if (ret)
+			return ret;
+		ret = dma_shared_data_prepare();
+		if (ret)
+			return ret;
+	} else {
+		ret = dma_shared_data_prepare();
+		if (ret)
+			return ret;
+		ret = dma_fp_data_prepare();
+		if (ret)
+			return ret;
+		ret = dma_dev_data_prepare();
+		if (ret)
+			return ret;
+	}
 
-	return dma_dev_data_prepare();
+	return 0;
 }
 
 static struct rte_dma_dev *
-dma_allocate(const char *name, int numa_node, size_t private_data_size)
+dma_allocate_primary(const char *name, int numa_node, size_t private_data_size)
 {
 	struct rte_dma_dev *dev;
 	void *dev_private;
@@ -197,12 +252,59 @@ dma_allocate(const char *name, int numa_node, size_t private_data_size)
 	}
 
 	dev = &rte_dma_devices[dev_id];
-	rte_strscpy(dev->dev_name, name, sizeof(dev->dev_name));
-	dev->dev_id = dev_id;
-	dev->numa_node = numa_node;
-	dev->dev_private = dev_private;
-	dev->fp_obj = &rte_dma_fp_objs[dev_id];
-	dma_fp_object_dummy(dev->fp_obj);
+	dev->data = &dma_devices_shared_data->data[dev_id];
+	rte_strscpy(dev->data->dev_name, name, sizeof(dev->data->dev_name));
+	dev->data->dev_id = dev_id;
+	dev->data->numa_node = numa_node;
+	dev->data->dev_private = dev_private;
+
+	return dev;
+}
+
+static struct rte_dma_dev *
+dma_attach_secondary(const char *name)
+{
+	struct rte_dma_dev *dev;
+	int16_t i;
+	int ret;
+
+	ret = dma_data_prepare();
+	if (ret < 0) {
+		RTE_DMA_LOG(ERR, "Cannot initialize dmadevs data");
+		return NULL;
+	}
+
+	for (i = 0; i < dma_devices_max; i++) {
+		if (!strcmp(dma_devices_shared_data->data[i].dev_name, name))
+			break;
+	}
+	if (i == dma_devices_max) {
+		RTE_DMA_LOG(ERR,
+			"Device %s is not driven by the primary process",
+			name);
+		return NULL;
+	}
+
+	dev = &rte_dma_devices[i];
+	dev->data = &dma_devices_shared_data->data[i];
+
+	return dev;
+}
+
+static struct rte_dma_dev *
+dma_allocate(const char *name, int numa_node, size_t private_data_size)
+{
+	struct rte_dma_dev *dev;
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+		dev = dma_allocate_primary(name, numa_node, private_data_size);
+	else
+		dev = dma_attach_secondary(name);
+
+	if (dev) {
+		dev->fp_obj = &rte_dma_fp_objs[dev->data->dev_id];
+		dma_fp_object_dummy(dev->fp_obj);
+	}
 
 	return dev;
 }
@@ -210,7 +312,11 @@ dma_allocate(const char *name, int numa_node, size_t private_data_size)
 static void
 dma_release(struct rte_dma_dev *dev)
 {
-	rte_free(dev->dev_private);
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		rte_free(dev->data->dev_private);
+		memset(dev->data, 0, sizeof(struct rte_dma_dev_data));
+	}
+
 	dma_fp_object_dummy(dev->fp_obj);
 	memset(dev, 0, sizeof(struct rte_dma_dev));
 }
@@ -245,7 +351,7 @@ rte_dma_pmd_release(const char *name)
 		return -EINVAL;
 
 	if (dev->state == RTE_DMA_DEV_READY)
-		return rte_dma_close(dev->dev_id);
+		return rte_dma_close(dev->data->dev_id);
 
 	dma_release(dev);
 	return 0;
@@ -263,7 +369,7 @@ rte_dma_get_dev_id_by_name(const char *name)
 	if (dev == NULL)
 		return -EINVAL;
 
-	return dev->dev_id;
+	return dev->data->dev_id;
 }
 
 bool
@@ -307,9 +413,9 @@ rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
 	if (ret != 0)
 		return ret;
 
-	dev_info->dev_name = dev->dev_name;
+	dev_info->dev_name = dev->data->dev_name;
 	dev_info->numa_node = dev->device->numa_node;
-	dev_info->nb_vchans = dev->dev_conf.nb_vchans;
+	dev_info->nb_vchans = dev->data->dev_conf.nb_vchans;
 
 	return 0;
 }
@@ -324,7 +430,7 @@ rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
 	if (!rte_dma_is_valid(dev_id) || dev_conf == NULL)
 		return -EINVAL;
 
-	if (dev->dev_started != 0) {
+	if (dev->data->dev_started != 0) {
 		RTE_DMA_LOG(ERR,
 			"Device %d must be stopped to allow configuration",
 			dev_id);
@@ -356,7 +462,8 @@ rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
 	ret = (*dev->dev_ops->dev_configure)(dev, dev_conf,
 					     sizeof(struct rte_dma_conf));
 	if (ret == 0)
-		memcpy(&dev->dev_conf, dev_conf, sizeof(struct rte_dma_conf));
+		memcpy(&dev->data->dev_conf, dev_conf,
+		       sizeof(struct rte_dma_conf));
 
 	return ret;
 }
@@ -370,12 +477,12 @@ rte_dma_start(int16_t dev_id)
 	if (!rte_dma_is_valid(dev_id))
 		return -EINVAL;
 
-	if (dev->dev_conf.nb_vchans == 0) {
+	if (dev->data->dev_conf.nb_vchans == 0) {
 		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
 		return -EINVAL;
 	}
 
-	if (dev->dev_started != 0) {
+	if (dev->data->dev_started != 0) {
 		RTE_DMA_LOG(WARNING, "Device %d already started", dev_id);
 		return 0;
 	}
@@ -388,7 +495,7 @@ rte_dma_start(int16_t dev_id)
 		return ret;
 
 mark_started:
-	dev->dev_started = 1;
+	dev->data->dev_started = 1;
 	return 0;
 }
 
@@ -401,7 +508,7 @@ rte_dma_stop(int16_t dev_id)
 	if (!rte_dma_is_valid(dev_id))
 		return -EINVAL;
 
-	if (dev->dev_started == 0) {
+	if (dev->data->dev_started == 0) {
 		RTE_DMA_LOG(WARNING, "Device %d already stopped", dev_id);
 		return 0;
 	}
@@ -414,7 +521,7 @@ rte_dma_stop(int16_t dev_id)
 		return ret;
 
 mark_stopped:
-	dev->dev_started = 0;
+	dev->data->dev_started = 0;
 	return 0;
 }
 
@@ -428,7 +535,7 @@ rte_dma_close(int16_t dev_id)
 		return -EINVAL;
 
 	/* Device must be stopped before it can be closed */
-	if (dev->dev_started == 1) {
+	if (dev->data->dev_started == 1) {
 		RTE_DMA_LOG(ERR,
 			"Device %d must be stopped before closing", dev_id);
 		return -EBUSY;
@@ -454,7 +561,7 @@ rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
 	if (!rte_dma_is_valid(dev_id) || conf == NULL)
 		return -EINVAL;
 
-	if (dev->dev_started != 0) {
+	if (dev->data->dev_started != 0) {
 		RTE_DMA_LOG(ERR,
 			"Device %d must be stopped to allow configuration",
 			dev_id);
@@ -466,7 +573,7 @@ rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
 		RTE_DMA_LOG(ERR, "Device %d get device info fail", dev_id);
 		return -EINVAL;
 	}
-	if (dev->dev_conf.nb_vchans == 0) {
+	if (dev->data->dev_conf.nb_vchans == 0) {
 		RTE_DMA_LOG(ERR, "Device %d must be configured first", dev_id);
 		return -EINVAL;
 	}
@@ -540,7 +647,7 @@ rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
 	if (!rte_dma_is_valid(dev_id) || stats == NULL)
 		return -EINVAL;
 
-	if (vchan >= dev->dev_conf.nb_vchans &&
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
 	    vchan != RTE_DMA_ALL_VCHAN) {
 		RTE_DMA_LOG(ERR,
 			"Device %d vchan %u out of range", dev_id, vchan);
@@ -561,7 +668,7 @@ rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
 	if (!rte_dma_is_valid(dev_id))
 		return -EINVAL;
 
-	if (vchan >= dev->dev_conf.nb_vchans &&
+	if (vchan >= dev->data->dev_conf.nb_vchans &&
 	    vchan != RTE_DMA_ALL_VCHAN) {
 		RTE_DMA_LOG(ERR,
 			"Device %d vchan %u out of range", dev_id, vchan);
@@ -634,14 +741,14 @@ rte_dma_dump(int16_t dev_id, FILE *f)
 	}
 
 	(void)fprintf(f, "DMA Dev %d, '%s' [%s]\n",
-		dev->dev_id,
-		dev->dev_name,
-		dev->dev_started ? "started" : "stopped");
+		dev->data->dev_id,
+		dev->data->dev_name,
+		dev->data->dev_started ? "started" : "stopped");
 	dma_dump_capability(f, dev_info.dev_capa);
 	(void)fprintf(f, "  max_vchans_supported: %u\n", dev_info.max_vchans);
 	(void)fprintf(f, "  nb_vchans_configured: %u\n", dev_info.nb_vchans);
 	(void)fprintf(f, "  silent_mode: %s\n",
-		dev->dev_conf.enable_silent ? "on" : "off");
+		dev->data->dev_conf.enable_silent ? "on" : "off");
 
 	if (dev->dev_ops->dev_dump != NULL)
 		return (*dev->dev_ops->dev_dump)(dev, f);
diff --git a/lib/dmadev/rte_dmadev_core.h b/lib/dmadev/rte_dmadev_core.h
index a6946052db..236d9d38e5 100644
--- a/lib/dmadev/rte_dmadev_core.h
+++ b/lib/dmadev/rte_dmadev_core.h
@@ -60,7 +60,7 @@ typedef uint16_t (*rte_dma_completed_status_t)(void *dev_private,
  */
 struct rte_dma_fp_object {
 	/** PMD-specific private data. The driver should copy
-	 * rte_dma_dev.dev_private to this field during initialization.
+	 * rte_dma_dev.data->dev_private to this field during initialization.
 	 */
 	void *dev_private;
 	rte_dma_copy_t             copy;
diff --git a/lib/dmadev/rte_dmadev_pmd.h b/lib/dmadev/rte_dmadev_pmd.h
index d6d2161306..23b07a4e1c 100644
--- a/lib/dmadev/rte_dmadev_pmd.h
+++ b/lib/dmadev/rte_dmadev_pmd.h
@@ -76,6 +76,27 @@ struct rte_dma_dev_ops {
 
 	rte_dma_dump_t             dev_dump;
 };
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each DMA device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ *
+ * @see struct rte_dma_dev::data
+ */
+struct rte_dma_dev_data {
+	char dev_name[RTE_DEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	int16_t dev_id; /**< Device [external] identifier. */
+	int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */
+	void *dev_private; /**< PMD-specific private data. */
+	struct rte_dma_conf dev_conf; /**< DMA device configuration. */
+	__extension__
+	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
+	uint64_t reserved[2]; /**< Reserved for future fields */
+} __rte_cache_aligned;
+
 /**
  * Possible states of a DMA device.
  *
@@ -94,20 +115,14 @@ enum rte_dma_dev_state {
  * The generic data structure associated with each DMA device.
  */
 struct rte_dma_dev {
-	char dev_name[RTE_DEV_NAME_MAX_LEN]; /**< Unique identifier name */
-	int16_t dev_id; /**< Device [external] identifier. */
-	int16_t numa_node; /**< Local NUMA memory ID. -1 if unknown. */
-	void *dev_private; /**< PMD-specific private data. */
 	/** Device info which supplied during device initialization. */
 	struct rte_device *device;
+	struct rte_dma_dev_data *data; /**< Pointer to shared device data. */
 	/**< Fast-path functions and related data. */
 	struct rte_dma_fp_object *fp_obj;
 	/** Functions implemented by PMD. */
 	const struct rte_dma_dev_ops *dev_ops;
-	struct rte_dma_conf dev_conf; /**< DMA device configuration. */
 	enum rte_dma_dev_state state; /**< Flag indicating the device state. */
-	__extension__
-	uint8_t dev_started : 1; /**< Device state: STARTED(1)/STOPPED(0). */
 	uint64_t reserved[2]; /**< Reserved for future fields. */
 } __rte_cache_aligned;
 
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v26 5/6] dma/skeleton: introduce skeleton dmadev driver
  2021-10-13 12:24 ` [dpdk-dev] [PATCH v26 0/6] support dmadev Chengwen Feng
                     ` (3 preceding siblings ...)
  2021-10-13 12:24   ` [dpdk-dev] [PATCH v26 4/6] dmadev: add multi-process support Chengwen Feng
@ 2021-10-13 12:24   ` Chengwen Feng
  2021-10-13 12:25   ` [dpdk-dev] [PATCH v26 6/6] app/test: add dmadev API test Chengwen Feng
  2021-10-17 19:17   ` [dpdk-dev] [PATCH v26 0/6] support dmadev Thomas Monjalon
  6 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-10-13 12:24 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

Skeleton dmadevice driver, on the lines of rawdev skeleton, is for
showcasing of the dmadev library.

Design of skeleton involves a virtual device which is plugged into VDEV
bus on initialization.

Also, enable compilation of dmadev skeleton drivers.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 MAINTAINERS                            |   1 +
 drivers/dma/meson.build                |   4 +-
 drivers/dma/skeleton/meson.build       |   7 +
 drivers/dma/skeleton/skeleton_dmadev.c | 571 +++++++++++++++++++++++++
 drivers/dma/skeleton/skeleton_dmadev.h |  61 +++
 drivers/dma/skeleton/version.map       |   3 +
 6 files changed, 646 insertions(+), 1 deletion(-)
 create mode 100644 drivers/dma/skeleton/meson.build
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
 create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
 create mode 100644 drivers/dma/skeleton/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 119cfaa04e..ec887ac49f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -457,6 +457,7 @@ F: doc/guides/regexdevs/features/default.ini
 DMA device API - EXPERIMENTAL
 M: Chengwen Feng <fengchengwen@huawei.com>
 F: lib/dmadev/
+F: drivers/dma/skeleton/
 F: doc/guides/prog_guide/dmadev.rst
 
 Eventdev API
diff --git a/drivers/dma/meson.build b/drivers/dma/meson.build
index a24c56d8ff..d9c7ede32f 100644
--- a/drivers/dma/meson.build
+++ b/drivers/dma/meson.build
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright 2021 HiSilicon Limited
 
-drivers = []
+drivers = [
+        'skeleton',
+]
diff --git a/drivers/dma/skeleton/meson.build b/drivers/dma/skeleton/meson.build
new file mode 100644
index 0000000000..8871b80956
--- /dev/null
+++ b/drivers/dma/skeleton/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2021 HiSilicon Limited
+
+deps += ['dmadev', 'kvargs', 'ring', 'bus_vdev']
+sources = files(
+        'skeleton_dmadev.c',
+)
diff --git a/drivers/dma/skeleton/skeleton_dmadev.c b/drivers/dma/skeleton/skeleton_dmadev.c
new file mode 100644
index 0000000000..22a73c6178
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.c
@@ -0,0 +1,571 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+#include <inttypes.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_cycles.h>
+#include <rte_eal.h>
+#include <rte_kvargs.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+
+#include <rte_dmadev_pmd.h>
+
+#include "skeleton_dmadev.h"
+
+RTE_LOG_REGISTER_DEFAULT(skeldma_logtype, INFO);
+#define SKELDMA_LOG(level, fmt, args...) \
+	rte_log(RTE_LOG_ ## level, skeldma_logtype, "%s(): " fmt "\n", \
+		__func__, ##args)
+
+/* Count of instances, currently only 1 is supported. */
+static uint16_t skeldma_count;
+
+static int
+skeldma_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info,
+		 uint32_t info_sz)
+{
+#define SKELDMA_MAX_DESC	8192
+#define SKELDMA_MIN_DESC	32
+
+	RTE_SET_USED(dev);
+	RTE_SET_USED(info_sz);
+
+	dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
+			     RTE_DMA_CAPA_SVA |
+			     RTE_DMA_CAPA_OPS_COPY;
+	dev_info->max_vchans = 1;
+	dev_info->max_desc = SKELDMA_MAX_DESC;
+	dev_info->min_desc = SKELDMA_MIN_DESC;
+
+	return 0;
+}
+
+static int
+skeldma_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf,
+		  uint32_t conf_sz)
+{
+	RTE_SET_USED(dev);
+	RTE_SET_USED(conf);
+	RTE_SET_USED(conf_sz);
+	return 0;
+}
+
+static void *
+cpucopy_thread(void *param)
+{
+#define SLEEP_THRESHOLD		10000
+#define SLEEP_US_VAL		10
+
+	struct rte_dma_dev *dev = param;
+	struct skeldma_hw *hw = dev->data->dev_private;
+	struct skeldma_desc *desc = NULL;
+	int ret;
+
+	while (!hw->exit_flag) {
+		ret = rte_ring_dequeue(hw->desc_running, (void **)&desc);
+		if (ret) {
+			hw->zero_req_count++;
+			if (hw->zero_req_count == 0)
+				hw->zero_req_count = SLEEP_THRESHOLD;
+			if (hw->zero_req_count >= SLEEP_THRESHOLD)
+				rte_delay_us_sleep(SLEEP_US_VAL);
+			continue;
+		}
+
+		hw->zero_req_count = 0;
+		rte_memcpy(desc->dst, desc->src, desc->len);
+		hw->completed_count++;
+		(void)rte_ring_enqueue(hw->desc_completed, (void *)desc);
+	}
+
+	return NULL;
+}
+
+static void
+fflush_ring(struct skeldma_hw *hw, struct rte_ring *ring)
+{
+	struct skeldma_desc *desc = NULL;
+	while (rte_ring_count(ring) > 0) {
+		(void)rte_ring_dequeue(ring, (void **)&desc);
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+}
+
+static int
+skeldma_start(struct rte_dma_dev *dev)
+{
+	struct skeldma_hw *hw = dev->data->dev_private;
+	rte_cpuset_t cpuset;
+	int ret;
+
+	if (hw->desc_mem == NULL) {
+		SKELDMA_LOG(ERR, "Vchan was not setup, start fail!");
+		return -EINVAL;
+	}
+
+	/* Reset the dmadev to a known state, include:
+	 * 1) fflush pending/running/completed ring to empty ring.
+	 * 2) init ring idx to zero.
+	 * 3) init running statistics.
+	 * 4) mark cpucopy task exit_flag to false.
+	 */
+	fflush_ring(hw, hw->desc_pending);
+	fflush_ring(hw, hw->desc_running);
+	fflush_ring(hw, hw->desc_completed);
+	hw->ridx = 0;
+	hw->submitted_count = 0;
+	hw->zero_req_count = 0;
+	hw->completed_count = 0;
+	hw->exit_flag = false;
+
+	rte_mb();
+
+	ret = rte_ctrl_thread_create(&hw->thread, "dma_skeleton", NULL,
+				     cpucopy_thread, dev);
+	if (ret) {
+		SKELDMA_LOG(ERR, "Start cpucopy thread fail!");
+		return -EINVAL;
+	}
+
+	if (hw->lcore_id != -1) {
+		cpuset = rte_lcore_cpuset(hw->lcore_id);
+		ret = pthread_setaffinity_np(hw->thread, sizeof(cpuset),
+					     &cpuset);
+		if (ret)
+			SKELDMA_LOG(WARNING,
+				"Set thread affinity lcore = %d fail!",
+				hw->lcore_id);
+	}
+
+	return 0;
+}
+
+static int
+skeldma_stop(struct rte_dma_dev *dev)
+{
+	struct skeldma_hw *hw = dev->data->dev_private;
+
+	hw->exit_flag = true;
+	rte_delay_ms(1);
+
+	pthread_cancel(hw->thread);
+	pthread_join(hw->thread, NULL);
+
+	return 0;
+}
+
+static int
+vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc)
+{
+	struct skeldma_desc *desc;
+	struct rte_ring *empty;
+	struct rte_ring *pending;
+	struct rte_ring *running;
+	struct rte_ring *completed;
+	uint16_t i;
+
+	desc = rte_zmalloc_socket("dma_skelteon_desc",
+				  nb_desc * sizeof(struct skeldma_desc),
+				  RTE_CACHE_LINE_SIZE, hw->socket_id);
+	if (desc == NULL) {
+		SKELDMA_LOG(ERR, "Malloc dma skeleton desc fail!");
+		return -ENOMEM;
+	}
+
+	empty = rte_ring_create("dma_skeleton_desc_empty", nb_desc,
+				hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	pending = rte_ring_create("dma_skeleton_desc_pending", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	running = rte_ring_create("dma_skeleton_desc_running", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	completed = rte_ring_create("dma_skeleton_desc_completed", nb_desc,
+				  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+	if (empty == NULL || pending == NULL || running == NULL ||
+	    completed == NULL) {
+		SKELDMA_LOG(ERR, "Create dma skeleton desc ring fail!");
+		rte_ring_free(empty);
+		rte_ring_free(pending);
+		rte_ring_free(running);
+		rte_ring_free(completed);
+		rte_free(desc);
+		return -ENOMEM;
+	}
+
+	/* The real usable ring size is *count-1* instead of *count* to
+	 * differentiate a free ring from an empty ring.
+	 * @see rte_ring_create
+	 */
+	for (i = 0; i < nb_desc - 1; i++)
+		(void)rte_ring_enqueue(empty, (void *)(desc + i));
+
+	hw->desc_mem = desc;
+	hw->desc_empty = empty;
+	hw->desc_pending = pending;
+	hw->desc_running = running;
+	hw->desc_completed = completed;
+
+	return 0;
+}
+
+static void
+vchan_release(struct skeldma_hw *hw)
+{
+	if (hw->desc_mem == NULL)
+		return;
+
+	rte_free(hw->desc_mem);
+	hw->desc_mem = NULL;
+	rte_ring_free(hw->desc_empty);
+	hw->desc_empty = NULL;
+	rte_ring_free(hw->desc_pending);
+	hw->desc_pending = NULL;
+	rte_ring_free(hw->desc_running);
+	hw->desc_running = NULL;
+	rte_ring_free(hw->desc_completed);
+	hw->desc_completed = NULL;
+}
+
+static int
+skeldma_close(struct rte_dma_dev *dev)
+{
+	/* The device already stopped */
+	vchan_release(dev->data->dev_private);
+	return 0;
+}
+
+static int
+skeldma_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
+		    const struct rte_dma_vchan_conf *conf,
+		    uint32_t conf_sz)
+{
+	struct skeldma_hw *hw = dev->data->dev_private;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(conf_sz);
+
+	if (!rte_is_power_of_2(conf->nb_desc)) {
+		SKELDMA_LOG(ERR, "Number of desc must be power of 2!");
+		return -EINVAL;
+	}
+
+	vchan_release(hw);
+	return vchan_setup(hw, conf->nb_desc);
+}
+
+static int
+skeldma_stats_get(const struct rte_dma_dev *dev, uint16_t vchan,
+		  struct rte_dma_stats *stats, uint32_t stats_sz)
+{
+	struct skeldma_hw *hw = dev->data->dev_private;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(stats_sz);
+
+	stats->submitted = hw->submitted_count;
+	stats->completed = hw->completed_count;
+	stats->errors = 0;
+
+	return 0;
+}
+
+static int
+skeldma_stats_reset(struct rte_dma_dev *dev, uint16_t vchan)
+{
+	struct skeldma_hw *hw = dev->data->dev_private;
+
+	RTE_SET_USED(vchan);
+
+	hw->submitted_count = 0;
+	hw->completed_count = 0;
+
+	return 0;
+}
+
+static int
+skeldma_dump(const struct rte_dma_dev *dev, FILE *f)
+{
+#define GET_RING_COUNT(ring)	((ring) ? (rte_ring_count(ring)) : 0)
+
+	struct skeldma_hw *hw = dev->data->dev_private;
+
+	(void)fprintf(f,
+		"    lcore_id: %d\n"
+		"    socket_id: %d\n"
+		"    desc_empty_ring_count: %u\n"
+		"    desc_pending_ring_count: %u\n"
+		"    desc_running_ring_count: %u\n"
+		"    desc_completed_ring_count: %u\n",
+		hw->lcore_id, hw->socket_id,
+		GET_RING_COUNT(hw->desc_empty),
+		GET_RING_COUNT(hw->desc_pending),
+		GET_RING_COUNT(hw->desc_running),
+		GET_RING_COUNT(hw->desc_completed));
+	(void)fprintf(f,
+		"    next_ring_idx: %u\n"
+		"    submitted_count: %" PRIu64 "\n"
+		"    completed_count: %" PRIu64 "\n",
+		hw->ridx, hw->submitted_count, hw->completed_count);
+
+	return 0;
+}
+
+static inline void
+submit(struct skeldma_hw *hw, struct skeldma_desc *desc)
+{
+	uint16_t count = rte_ring_count(hw->desc_pending);
+	struct skeldma_desc *pend_desc = NULL;
+
+	while (count > 0) {
+		(void)rte_ring_dequeue(hw->desc_pending, (void **)&pend_desc);
+		(void)rte_ring_enqueue(hw->desc_running, (void *)pend_desc);
+		count--;
+	}
+
+	if (desc)
+		(void)rte_ring_enqueue(hw->desc_running, (void *)desc);
+}
+
+static int
+skeldma_copy(void *dev_private, uint16_t vchan,
+	     rte_iova_t src, rte_iova_t dst,
+	     uint32_t length, uint64_t flags)
+{
+	struct skeldma_hw *hw = dev_private;
+	struct skeldma_desc *desc;
+	int ret;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(flags);
+
+	ret = rte_ring_dequeue(hw->desc_empty, (void **)&desc);
+	if (ret)
+		return -ENOSPC;
+	desc->src = (void *)(uintptr_t)src;
+	desc->dst = (void *)(uintptr_t)dst;
+	desc->len = length;
+	desc->ridx = hw->ridx;
+	if (flags & RTE_DMA_OP_FLAG_SUBMIT)
+		submit(hw, desc);
+	else
+		(void)rte_ring_enqueue(hw->desc_pending, (void *)desc);
+	hw->submitted_count++;
+
+	return hw->ridx++;
+}
+
+static int
+skeldma_submit(void *dev_private, uint16_t vchan)
+{
+	struct skeldma_hw *hw = dev_private;
+	RTE_SET_USED(vchan);
+	submit(hw, NULL);
+	return 0;
+}
+
+static uint16_t
+skeldma_completed(void *dev_private,
+		  uint16_t vchan, const uint16_t nb_cpls,
+		  uint16_t *last_idx, bool *has_error)
+{
+	struct skeldma_hw *hw = dev_private;
+	struct skeldma_desc *desc = NULL;
+	uint16_t index = 0;
+	uint16_t count;
+
+	RTE_SET_USED(vchan);
+	RTE_SET_USED(has_error);
+
+	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+	while (index < count) {
+		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+		if (index == count - 1)
+			*last_idx = desc->ridx;
+		index++;
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+
+	return count;
+}
+
+static uint16_t
+skeldma_completed_status(void *dev_private,
+			 uint16_t vchan, const uint16_t nb_cpls,
+			 uint16_t *last_idx, enum rte_dma_status_code *status)
+{
+	struct skeldma_hw *hw = dev_private;
+	struct skeldma_desc *desc = NULL;
+	uint16_t index = 0;
+	uint16_t count;
+
+	RTE_SET_USED(vchan);
+
+	count = RTE_MIN(nb_cpls, rte_ring_count(hw->desc_completed));
+	while (index < count) {
+		(void)rte_ring_dequeue(hw->desc_completed, (void **)&desc);
+		if (index == count - 1)
+			*last_idx = desc->ridx;
+		status[index++] = RTE_DMA_STATUS_SUCCESSFUL;
+		(void)rte_ring_enqueue(hw->desc_empty, (void *)desc);
+	}
+
+	return count;
+}
+
+static const struct rte_dma_dev_ops skeldma_ops = {
+	.dev_info_get     = skeldma_info_get,
+	.dev_configure    = skeldma_configure,
+	.dev_start        = skeldma_start,
+	.dev_stop         = skeldma_stop,
+	.dev_close        = skeldma_close,
+
+	.vchan_setup      = skeldma_vchan_setup,
+
+	.stats_get        = skeldma_stats_get,
+	.stats_reset      = skeldma_stats_reset,
+
+	.dev_dump         = skeldma_dump,
+};
+
+static int
+skeldma_create(const char *name, struct rte_vdev_device *vdev, int lcore_id)
+{
+	struct rte_dma_dev *dev;
+	struct skeldma_hw *hw;
+	int socket_id;
+
+	socket_id = (lcore_id < 0) ? rte_socket_id() :
+				     rte_lcore_to_socket_id(lcore_id);
+	dev = rte_dma_pmd_allocate(name, socket_id, sizeof(struct skeldma_hw));
+	if (dev == NULL) {
+		SKELDMA_LOG(ERR, "Unable to allocate dmadev: %s", name);
+		return -EINVAL;
+	}
+
+	dev->device = &vdev->device;
+	dev->dev_ops = &skeldma_ops;
+	dev->fp_obj->dev_private = dev->data->dev_private;
+	dev->fp_obj->copy = skeldma_copy;
+	dev->fp_obj->submit = skeldma_submit;
+	dev->fp_obj->completed = skeldma_completed;
+	dev->fp_obj->completed_status = skeldma_completed_status;
+
+	hw = dev->data->dev_private;
+	hw->lcore_id = lcore_id;
+	hw->socket_id = socket_id;
+
+	dev->state = RTE_DMA_DEV_READY;
+
+	return dev->data->dev_id;
+}
+
+static int
+skeldma_destroy(const char *name)
+{
+	return rte_dma_pmd_release(name);
+}
+
+static int
+skeldma_parse_lcore(const char *key __rte_unused,
+		    const char *value,
+		    void *opaque)
+{
+	int lcore_id = atoi(value);
+	if (lcore_id >= 0 && lcore_id < RTE_MAX_LCORE)
+		*(int *)opaque = lcore_id;
+	return 0;
+}
+
+static void
+skeldma_parse_vdev_args(struct rte_vdev_device *vdev, int *lcore_id)
+{
+	static const char *const args[] = {
+		SKELDMA_ARG_LCORE,
+		NULL
+	};
+
+	struct rte_kvargs *kvlist;
+	const char *params;
+
+	params = rte_vdev_device_args(vdev);
+	if (params == NULL || params[0] == '\0')
+		return;
+
+	kvlist = rte_kvargs_parse(params, args);
+	if (!kvlist)
+		return;
+
+	(void)rte_kvargs_process(kvlist, SKELDMA_ARG_LCORE,
+				 skeldma_parse_lcore, lcore_id);
+	SKELDMA_LOG(INFO, "Parse lcore_id = %d", *lcore_id);
+
+	rte_kvargs_free(kvlist);
+}
+
+static int
+skeldma_probe(struct rte_vdev_device *vdev)
+{
+	const char *name;
+	int lcore_id = -1;
+	int ret;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -EINVAL;
+
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+		SKELDMA_LOG(ERR, "Multiple process not supported for %s", name);
+		return -EINVAL;
+	}
+
+	/* More than one instance is not supported */
+	if (skeldma_count > 0) {
+		SKELDMA_LOG(ERR, "Multiple instance not supported for %s",
+			name);
+		return -EINVAL;
+	}
+
+	skeldma_parse_vdev_args(vdev, &lcore_id);
+
+	ret = skeldma_create(name, vdev, lcore_id);
+	if (ret >= 0) {
+		SKELDMA_LOG(INFO, "Create %s dmadev with lcore-id %d",
+			name, lcore_id);
+		skeldma_count = 1;
+	}
+
+	return ret < 0 ? ret : 0;
+}
+
+static int
+skeldma_remove(struct rte_vdev_device *vdev)
+{
+	const char *name;
+	int ret;
+
+	name = rte_vdev_device_name(vdev);
+	if (name == NULL)
+		return -1;
+
+	ret = skeldma_destroy(name);
+	if (!ret) {
+		skeldma_count = 0;
+		SKELDMA_LOG(INFO, "Remove %s dmadev", name);
+	}
+
+	return ret;
+}
+
+static struct rte_vdev_driver skeldma_pmd_drv = {
+	.probe = skeldma_probe,
+	.remove = skeldma_remove,
+	.drv_flags = RTE_VDEV_DRV_NEED_IOVA_AS_VA,
+};
+
+RTE_PMD_REGISTER_VDEV(dma_skeleton, skeldma_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(dma_skeleton,
+		SKELDMA_ARG_LCORE "=<uint16> ");
diff --git a/drivers/dma/skeleton/skeleton_dmadev.h b/drivers/dma/skeleton/skeleton_dmadev.h
new file mode 100644
index 0000000000..eaa52364bf
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+#ifndef SKELETON_DMADEV_H
+#define SKELETON_DMADEV_H
+
+#include <pthread.h>
+
+#include <rte_ring.h>
+
+#define SKELDMA_ARG_LCORE	"lcore"
+
+struct skeldma_desc {
+	void *src;
+	void *dst;
+	uint32_t len;
+	uint16_t ridx; /* ring idx */
+};
+
+struct skeldma_hw {
+	int lcore_id; /* cpucopy task affinity core */
+	int socket_id;
+	pthread_t thread; /* cpucopy task thread */
+	volatile int exit_flag; /* cpucopy task exit flag */
+
+	struct skeldma_desc *desc_mem;
+
+	/* Descriptor ring state machine:
+	 *
+	 *  -----------     enqueue without submit     -----------
+	 *  |  empty  |------------------------------->| pending |
+	 *  -----------\                               -----------
+	 *       ^      \------------                       |
+	 *       |                  |                       |submit doorbell
+	 *       |                  |                       |
+	 *       |                  |enqueue with submit    |
+	 *       |get completed     |------------------|    |
+	 *       |                                     |    |
+	 *       |                                     v    v
+	 *  -----------     cpucopy thread working     -----------
+	 *  |completed|<-------------------------------| running |
+	 *  -----------                                -----------
+	 */
+	struct rte_ring *desc_empty;
+	struct rte_ring *desc_pending;
+	struct rte_ring *desc_running;
+	struct rte_ring *desc_completed;
+
+	/* Cache delimiter for dataplane API's operation data */
+	char cache1 __rte_cache_aligned;
+	uint16_t ridx;  /* ring idx */
+	uint64_t submitted_count;
+
+	/* Cache delimiter for cpucopy thread's operation data */
+	char cache2 __rte_cache_aligned;
+	uint32_t zero_req_count;
+	uint64_t completed_count;
+};
+
+#endif /* SKELETON_DMADEV_H */
diff --git a/drivers/dma/skeleton/version.map b/drivers/dma/skeleton/version.map
new file mode 100644
index 0000000000..c2e0723b4c
--- /dev/null
+++ b/drivers/dma/skeleton/version.map
@@ -0,0 +1,3 @@
+DPDK_22 {
+	local: *;
+};
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* [dpdk-dev] [PATCH v26 6/6] app/test: add dmadev API test
  2021-10-13 12:24 ` [dpdk-dev] [PATCH v26 0/6] support dmadev Chengwen Feng
                     ` (4 preceding siblings ...)
  2021-10-13 12:24   ` [dpdk-dev] [PATCH v26 5/6] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
@ 2021-10-13 12:25   ` Chengwen Feng
  2021-10-17 19:17   ` [dpdk-dev] [PATCH v26 0/6] support dmadev Thomas Monjalon
  6 siblings, 0 replies; 339+ messages in thread
From: Chengwen Feng @ 2021-10-13 12:25 UTC (permalink / raw)
  To: thomas, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko
  Cc: dev, mb, nipun.gupta, hemant.agrawal, maxime.coquelin,
	honnappa.nagarahalli, david.marchand, sburla, pkapoor,
	konstantin.ananyev, conor.walsh, kevin.laatz

This patch add dmadev API test which based on 'dma_skeleton' vdev. The
test cases could be executed using 'dmadev_autotest' command in test
framework.

Signed-off-by: Chengwen Feng <fengchengwen@huawei.com>
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Reviewed-by: Kevin Laatz <kevin.laatz@intel.com>
Reviewed-by: Conor Walsh <conor.walsh@intel.com>
---
 MAINTAINERS                |   1 +
 app/test/meson.build       |   4 +
 app/test/test_dmadev.c     |  41 +++
 app/test/test_dmadev_api.c | 574 +++++++++++++++++++++++++++++++++++++
 app/test/test_dmadev_api.h |   5 +
 5 files changed, 625 insertions(+)
 create mode 100644 app/test/test_dmadev.c
 create mode 100644 app/test/test_dmadev_api.c
 create mode 100644 app/test/test_dmadev_api.h

diff --git a/MAINTAINERS b/MAINTAINERS
index ec887ac49f..d329873465 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -458,6 +458,7 @@ DMA device API - EXPERIMENTAL
 M: Chengwen Feng <fengchengwen@huawei.com>
 F: lib/dmadev/
 F: drivers/dma/skeleton/
+F: app/test/test_dmadev*
 F: doc/guides/prog_guide/dmadev.rst
 
 Eventdev API
diff --git a/app/test/meson.build b/app/test/meson.build
index f144d8b8ed..a16374b7a1 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -44,6 +44,8 @@ test_sources = files(
         'test_debug.c',
         'test_distributor.c',
         'test_distributor_perf.c',
+        'test_dmadev.c',
+        'test_dmadev_api.c',
         'test_eal_flags.c',
         'test_eal_fs.c',
         'test_efd.c',
@@ -163,6 +165,7 @@ test_deps = [
         'cmdline',
         'cryptodev',
         'distributor',
+        'dmadev',
         'efd',
         'ethdev',
         'eventdev',
@@ -334,6 +337,7 @@ driver_test_names = [
         'cryptodev_sw_mvsam_autotest',
         'cryptodev_sw_snow3g_autotest',
         'cryptodev_sw_zuc_autotest',
+        'dmadev_autotest',
         'eventdev_selftest_octeontx',
         'eventdev_selftest_sw',
         'rawdev_autotest',
diff --git a/app/test/test_dmadev.c b/app/test/test_dmadev.c
new file mode 100644
index 0000000000..45da6b76fe
--- /dev/null
+++ b/app/test/test_dmadev.c
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include <rte_dmadev.h>
+#include <rte_bus_vdev.h>
+
+#include "test.h"
+#include "test_dmadev_api.h"
+
+static int
+test_apis(void)
+{
+	const char *pmd = "dma_skeleton";
+	int id;
+	int ret;
+
+	if (rte_vdev_init(pmd, NULL) < 0)
+		return TEST_SKIPPED;
+	id = rte_dma_get_dev_id_by_name(pmd);
+	if (id < 0)
+		return TEST_SKIPPED;
+	printf("\n### Test dmadev infrastructure using skeleton driver\n");
+	ret = test_dma_api(id);
+	rte_vdev_uninit(pmd);
+
+	return ret;
+}
+
+static int
+test_dma(void)
+{
+	/* basic sanity on dmadev infrastructure */
+	if (test_apis() < 0)
+		return -1;
+
+	return 0;
+}
+
+REGISTER_TEST_COMMAND(dmadev_autotest, test_dma);
diff --git a/app/test/test_dmadev_api.c b/app/test/test_dmadev_api.c
new file mode 100644
index 0000000000..4a181af90a
--- /dev/null
+++ b/app/test/test_dmadev_api.c
@@ -0,0 +1,574 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+#include <string.h>
+
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_test.h>
+#include <rte_dmadev.h>
+
+extern int test_dma_api(uint16_t dev_id);
+
+#define DMA_TEST_API_RUN(test) \
+	testsuite_run_test(test, #test)
+
+#define TEST_MEMCPY_SIZE	1024
+#define TEST_WAIT_US_VAL	50000
+
+#define TEST_SUCCESS 0
+#define TEST_FAILED  -1
+
+static int16_t test_dev_id;
+static int16_t invalid_dev_id;
+
+static char *src;
+static char *dst;
+
+static int total;
+static int passed;
+static int failed;
+
+static int
+testsuite_setup(int16_t dev_id)
+{
+	test_dev_id = dev_id;
+	invalid_dev_id = -1;
+
+	src = rte_malloc("dmadev_test_src", TEST_MEMCPY_SIZE, 0);
+	if (src == NULL)
+		return -ENOMEM;
+	dst = rte_malloc("dmadev_test_dst", TEST_MEMCPY_SIZE, 0);
+	if (dst == NULL) {
+		rte_free(src);
+		src = NULL;
+		return -ENOMEM;
+	}
+
+	total = 0;
+	passed = 0;
+	failed = 0;
+
+	/* Set dmadev log level to critical to suppress unnecessary output
+	 * during API tests.
+	 */
+	rte_log_set_level_pattern("lib.dmadev", RTE_LOG_CRIT);
+
+	return 0;
+}
+
+static void
+testsuite_teardown(void)
+{
+	rte_free(src);
+	src = NULL;
+	rte_free(dst);
+	dst = NULL;
+	/* Ensure the dmadev is stopped. */
+	rte_dma_stop(test_dev_id);
+
+	rte_log_set_level_pattern("lib.dmadev", RTE_LOG_INFO);
+}
+
+static void
+testsuite_run_test(int (*test)(void), const char *name)
+{
+	int ret = 0;
+
+	if (test) {
+		ret = test();
+		if (ret < 0) {
+			failed++;
+			printf("%s Failed\n", name);
+		} else {
+			passed++;
+			printf("%s Passed\n", name);
+		}
+	}
+
+	total++;
+}
+
+static int
+test_dma_get_dev_id_by_name(void)
+{
+	int ret = rte_dma_get_dev_id_by_name("invalid_dmadev_device");
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_is_valid_dev(void)
+{
+	int ret;
+	ret = rte_dma_is_valid(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == false, "Expected false for invalid dev id");
+	ret = rte_dma_is_valid(test_dev_id);
+	RTE_TEST_ASSERT(ret == true, "Expected true for valid dev id");
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_count(void)
+{
+	uint16_t count = rte_dma_count_avail();
+	RTE_TEST_ASSERT(count > 0, "Invalid dmadev count %u", count);
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_info_get(void)
+{
+	struct rte_dma_info info =  { 0 };
+	int ret;
+
+	ret = rte_dma_info_get(invalid_dev_id, &info);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_info_get(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_configure(void)
+{
+	struct rte_dma_conf conf = { 0 };
+	struct rte_dma_info info = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_configure(invalid_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_configure(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for nb_vchans == 0 */
+	memset(&conf, 0, sizeof(conf));
+	ret = rte_dma_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for conf.nb_vchans > info.max_vchans */
+	ret = rte_dma_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans + 1;
+	ret = rte_dma_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check enable silent mode */
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans;
+	conf.enable_silent = true;
+	ret = rte_dma_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Configure success */
+	memset(&conf, 0, sizeof(conf));
+	conf.nb_vchans = info.max_vchans;
+	ret = rte_dma_configure(test_dev_id, &conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret);
+
+	/* Check configure success */
+	ret = rte_dma_info_get(test_dev_id, &info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	RTE_TEST_ASSERT_EQUAL(conf.nb_vchans, info.nb_vchans,
+			      "Configure nb_vchans not match");
+
+	return TEST_SUCCESS;
+}
+
+static int
+check_direction(void)
+{
+	struct rte_dma_vchan_conf vchan_conf;
+	int ret;
+
+	/* Check for direction */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV + 1;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM - 1;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for direction and dev_capa combination */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_DEV;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_MEM;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_DEV_TO_DEV;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	return 0;
+}
+
+static int
+check_port_type(struct rte_dma_info *dev_info)
+{
+	struct rte_dma_vchan_conf vchan_conf;
+	int ret;
+
+	/* Check src port type validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info->min_desc;
+	vchan_conf.src_port.port_type = RTE_DMA_PORT_PCIE;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check dst port type validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info->min_desc;
+	vchan_conf.dst_port.port_type = RTE_DMA_PORT_PCIE;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	return 0;
+}
+
+static int
+test_dma_vchan_setup(void)
+{
+	struct rte_dma_vchan_conf vchan_conf = { 0 };
+	struct rte_dma_conf dev_conf = { 0 };
+	struct rte_dma_info dev_info = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_vchan_setup(invalid_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_vchan_setup(test_dev_id, 0, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Make sure configure success */
+	ret = rte_dma_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info");
+	dev_conf.nb_vchans = dev_info.max_vchans;
+	ret = rte_dma_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure dmadev, %d", ret);
+
+	/* Check for invalid vchan */
+	ret = rte_dma_vchan_setup(test_dev_id, dev_conf.nb_vchans, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for direction */
+	ret = check_direction();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to check direction");
+
+	/* Check for nb_desc validation */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc - 1;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	vchan_conf.nb_desc = dev_info.max_desc + 1;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check port type */
+	ret = check_port_type(&dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to check port type");
+
+	/* Check vchan setup success */
+	memset(&vchan_conf, 0, sizeof(vchan_conf));
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+setup_one_vchan(void)
+{
+	struct rte_dma_vchan_conf vchan_conf = { 0 };
+	struct rte_dma_info dev_info = { 0 };
+	struct rte_dma_conf dev_conf = { 0 };
+	int ret;
+
+	ret = rte_dma_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+	dev_conf.nb_vchans = dev_info.max_vchans;
+	ret = rte_dma_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure, %d", ret);
+	vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM;
+	vchan_conf.nb_desc = dev_info.min_desc;
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup vchan, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_start_stop(void)
+{
+	struct rte_dma_vchan_conf vchan_conf = { 0 };
+	struct rte_dma_conf dev_conf = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_start(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_stop(invalid_dev_id);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dma_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Check reconfigure and vchan setup when device started */
+	ret = rte_dma_configure(test_dev_id, &dev_conf);
+	RTE_TEST_ASSERT(ret == -EBUSY, "Failed to configure, %d", ret);
+	ret = rte_dma_vchan_setup(test_dev_id, 0, &vchan_conf);
+	RTE_TEST_ASSERT(ret == -EBUSY, "Failed to setup vchan, %d", ret);
+
+	ret = rte_dma_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_stats(void)
+{
+	struct rte_dma_info dev_info = { 0 };
+	struct rte_dma_stats stats = { 0 };
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_stats_get(invalid_dev_id, 0, &stats);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_stats_get(invalid_dev_id, 0, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_stats_reset(invalid_dev_id, 0);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	/* Check for invalid vchan */
+	ret = rte_dma_info_get(test_dev_id, &dev_info);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret);
+	ret = rte_dma_stats_get(test_dev_id, dev_info.max_vchans, &stats);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+	ret = rte_dma_stats_reset(test_dev_id, dev_info.max_vchans);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret);
+
+	/* Check for valid vchan */
+	ret = rte_dma_stats_get(test_dev_id, 0, &stats);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get stats, %d", ret);
+	ret = rte_dma_stats_get(test_dev_id, RTE_DMA_ALL_VCHAN, &stats);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to get all stats, %d", ret);
+	ret = rte_dma_stats_reset(test_dev_id, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset stats, %d", ret);
+	ret = rte_dma_stats_reset(test_dev_id, RTE_DMA_ALL_VCHAN);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to reset all stats, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_dump(void)
+{
+	int ret;
+
+	/* Check for invalid parameters */
+	ret = rte_dma_dump(invalid_dev_id, stderr);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Excepted -EINVAL, %d", ret);
+	ret = rte_dma_dump(test_dev_id, NULL);
+	RTE_TEST_ASSERT(ret == -EINVAL, "Excepted -EINVAL, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static void
+setup_memory(void)
+{
+	int i;
+
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++)
+		src[i] = (char)i;
+	memset(dst, 0, TEST_MEMCPY_SIZE);
+}
+
+static int
+verify_memory(void)
+{
+	int i;
+
+	for (i = 0; i < TEST_MEMCPY_SIZE; i++) {
+		if (src[i] == dst[i])
+			continue;
+		RTE_TEST_ASSERT_EQUAL(src[i], dst[i],
+			"Failed to copy memory, %d %d", src[i], dst[i]);
+	}
+
+	return 0;
+}
+
+static int
+test_dma_completed(void)
+{
+	uint16_t last_idx = 1;
+	bool has_error = true;
+	uint16_t cpl_ret;
+	int ret;
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dma_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	setup_memory();
+
+	/* Check enqueue without submit */
+	ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+			   TEST_MEMCPY_SIZE, 0);
+	RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed(test_dev_id, 0, 1, &last_idx, &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to get completed");
+
+	/* Check add submit */
+	ret = rte_dma_submit(test_dev_id, 0);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to submit, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed(test_dev_id, 0, 1, &last_idx, &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u",
+				last_idx);
+	RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+	ret = verify_memory();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to verify memory");
+
+	setup_memory();
+
+	/* Check for enqueue with submit */
+	ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+			   TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 1, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed(test_dev_id, 0, 1, &last_idx, &has_error);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to get completed");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u",
+				last_idx);
+	RTE_TEST_ASSERT_EQUAL(has_error, false, "Should have no error");
+	ret = verify_memory();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to verify memory");
+
+	/* Stop dmadev to make sure dmadev to a known state */
+	ret = rte_dma_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+static int
+test_dma_completed_status(void)
+{
+	enum rte_dma_status_code status[1] = { 1 };
+	uint16_t last_idx = 1;
+	uint16_t cpl_ret, i;
+	int ret;
+
+	/* Setup one vchan for later test */
+	ret = setup_one_vchan();
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret);
+
+	ret = rte_dma_start(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret);
+
+	/* Check for enqueue with submit */
+	ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+			   TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed_status(test_dev_id, 0, 1, &last_idx,
+					   status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 0, "Last idx should be zero, %u",
+				last_idx);
+	for (i = 0; i < RTE_DIM(status); i++)
+		RTE_TEST_ASSERT_EQUAL(status[i], 0,
+				"Failed to completed status, %d", status[i]);
+
+	/* Check do completed status again */
+	cpl_ret = rte_dma_completed_status(test_dev_id, 0, 1, &last_idx,
+					   status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 0, "Failed to completed status");
+
+	/* Check for enqueue with submit again */
+	ret = rte_dma_copy(test_dev_id, 0, (rte_iova_t)src, (rte_iova_t)dst,
+			   TEST_MEMCPY_SIZE, RTE_DMA_OP_FLAG_SUBMIT);
+	RTE_TEST_ASSERT_EQUAL(ret, 1, "Failed to enqueue copy, %d", ret);
+	rte_delay_us_sleep(TEST_WAIT_US_VAL);
+	cpl_ret = rte_dma_completed_status(test_dev_id, 0, 1, &last_idx,
+					   status);
+	RTE_TEST_ASSERT_EQUAL(cpl_ret, 1, "Failed to completed status");
+	RTE_TEST_ASSERT_EQUAL(last_idx, 1, "Last idx should be 1, %u",
+				last_idx);
+	for (i = 0; i < RTE_DIM(status); i++)
+		RTE_TEST_ASSERT_EQUAL(status[i], 0,
+				"Failed to completed status, %d", status[i]);
+
+	/* Stop dmadev to make sure dmadev to a known state */
+	ret = rte_dma_stop(test_dev_id);
+	RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret);
+
+	return TEST_SUCCESS;
+}
+
+int
+test_dma_api(uint16_t dev_id)
+{
+	int ret = testsuite_setup(dev_id);
+	if (ret) {
+		printf("testsuite setup fail!\n");
+		return -1;
+	}
+
+	/* If the testcase exit successfully, ensure that the test dmadev exist
+	 * and the dmadev is in the stopped state.
+	 */
+	DMA_TEST_API_RUN(test_dma_get_dev_id_by_name);
+	DMA_TEST_API_RUN(test_dma_is_valid_dev);
+	DMA_TEST_API_RUN(test_dma_count);
+	DMA_TEST_API_RUN(test_dma_info_get);
+	DMA_TEST_API_RUN(test_dma_configure);
+	DMA_TEST_API_RUN(test_dma_vchan_setup);
+	DMA_TEST_API_RUN(test_dma_start_stop);
+	DMA_TEST_API_RUN(test_dma_stats);
+	DMA_TEST_API_RUN(test_dma_dump);
+	DMA_TEST_API_RUN(test_dma_completed);
+	DMA_TEST_API_RUN(test_dma_completed_status);
+
+	testsuite_teardown();
+
+	printf("Total tests   : %d\n", total);
+	printf("Passed        : %d\n", passed);
+	printf("Failed        : %d\n", failed);
+
+	if (failed)
+		return -1;
+
+	return 0;
+};
diff --git a/app/test/test_dmadev_api.h b/app/test/test_dmadev_api.h
new file mode 100644
index 0000000000..33fbc5bd41
--- /dev/null
+++ b/app/test/test_dmadev_api.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited
+ */
+
+int test_dma_api(uint16_t dev_id);
-- 
2.33.0


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v25 1/6] dmadev: introduce DMA device library
  2021-10-13  7:41         ` Thomas Monjalon
@ 2021-10-15  8:29           ` Thomas Monjalon
  2021-10-15  9:59             ` fengchengwen
  0 siblings, 1 reply; 339+ messages in thread
From: Thomas Monjalon @ 2021-10-15  8:29 UTC (permalink / raw)
  To: fengchengwen
  Cc: dev, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko, dev, mb, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh, kevin.laatz

13/10/2021 09:41, Thomas Monjalon:
> 13/10/2021 02:21, fengchengwen:
> > On 2021/10/13 3:09, Thomas Monjalon wrote:
> > > 11/10/2021 09:33, Chengwen Feng:
> > >> +static void
> > >> +dma_release(struct rte_dma_dev *dev)
> > >> +{
> > >> +	rte_free(dev->dev_private);
> > >> +	memset(dev, 0, sizeof(struct rte_dma_dev));
> > >> +}
> [...]
> > >> +int
> > >> +rte_dma_pmd_release(const char *name)
> > >> +{
> > >> +	struct rte_dma_dev *dev;
> > >> +
> > >> +	if (dma_check_name(name) != 0)
> > >> +		return -EINVAL;
> > >> +
> > >> +	dev = dma_find_by_name(name);
> > >> +	if (dev == NULL)
> > >> +		return -EINVAL;
> > >> +
> > >> +	dma_release(dev);
> > >> +	return 0;
> > >> +}
> > > 
> > > Trying to understand the logic of creation/destroy.
> > > skeldma_probe
> > > \-> skeldma_create
> > >     \-> rte_dma_pmd_allocate
> > >         \-> dma_allocate
> > >             \-> dma_data_prepare
> > >                 \-> dma_dev_data_prepare
> > > skeldma_remove
> > > \-> skeldma_destroy
> > >     \-> rte_dma_pmd_release
> > >         \-> dma_release
> > 
> > This patch only provide device allocate function, the 2st patch provide extra logic:
> > 
> > 	diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
> > 	index 42a4693bd9..a6a5680d2b 100644
> > 	--- a/lib/dmadev/rte_dmadev.c
> > 	+++ b/lib/dmadev/rte_dmadev.c
> > 	@@ -201,6 +201,9 @@ rte_dma_pmd_release(const char *name)
> > 	        if (dev == NULL)
> >         	        return -EINVAL;
> > 
> > 	+       if (dev->state == RTE_DMA_DEV_READY)
> > 	+               return rte_dma_close(dev->dev_id);
> > 	+
> >         	dma_release(dev);
> > 	        return 0;
> > 	 }
> > 
> > So the skeldma remove will be:
> > 
> >  skeldma_remove
> >  \-> skeldma_destroy
> >      \-> rte_dma_pmd_release
> >          \-> rte_dma_close
> >              \-> dma_release
> 
> OK, in this case, no need to dma_release from rte_dma_pmd_release,
> because it is already called from rte_dma_close.

Ping for reply please.




^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v25 1/6] dmadev: introduce DMA device library
  2021-10-15  8:29           ` Thomas Monjalon
@ 2021-10-15  9:59             ` fengchengwen
  2021-10-15 13:46               ` Thomas Monjalon
  0 siblings, 1 reply; 339+ messages in thread
From: fengchengwen @ 2021-10-15  9:59 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: dev, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko, mb, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh, kevin.laatz

On 2021/10/15 16:29, Thomas Monjalon wrote:
> 13/10/2021 09:41, Thomas Monjalon:
>> 13/10/2021 02:21, fengchengwen:
>>> On 2021/10/13 3:09, Thomas Monjalon wrote:
>>>> 11/10/2021 09:33, Chengwen Feng:
>>>>> +static void
>>>>> +dma_release(struct rte_dma_dev *dev)
>>>>> +{
>>>>> +	rte_free(dev->dev_private);
>>>>> +	memset(dev, 0, sizeof(struct rte_dma_dev));
>>>>> +}
>> [...]
>>>>> +int
>>>>> +rte_dma_pmd_release(const char *name)
>>>>> +{
>>>>> +	struct rte_dma_dev *dev;
>>>>> +
>>>>> +	if (dma_check_name(name) != 0)
>>>>> +		return -EINVAL;
>>>>> +
>>>>> +	dev = dma_find_by_name(name);
>>>>> +	if (dev == NULL)
>>>>> +		return -EINVAL;
>>>>> +
>>>>> +	dma_release(dev);
>>>>> +	return 0;
>>>>> +}
>>>>
>>>> Trying to understand the logic of creation/destroy.
>>>> skeldma_probe
>>>> \-> skeldma_create
>>>>     \-> rte_dma_pmd_allocate
>>>>         \-> dma_allocate
>>>>             \-> dma_data_prepare
>>>>                 \-> dma_dev_data_prepare
>>>> skeldma_remove
>>>> \-> skeldma_destroy
>>>>     \-> rte_dma_pmd_release
>>>>         \-> dma_release
>>>
>>> This patch only provide device allocate function, the 2st patch provide extra logic:
>>>
>>> 	diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
>>> 	index 42a4693bd9..a6a5680d2b 100644
>>> 	--- a/lib/dmadev/rte_dmadev.c
>>> 	+++ b/lib/dmadev/rte_dmadev.c
>>> 	@@ -201,6 +201,9 @@ rte_dma_pmd_release(const char *name)
>>> 	        if (dev == NULL)
>>>         	        return -EINVAL;
>>>
>>> 	+       if (dev->state == RTE_DMA_DEV_READY)
>>> 	+               return rte_dma_close(dev->dev_id);
>>> 	+
>>>         	dma_release(dev);
>>> 	        return 0;
>>> 	 }
>>>
>>> So the skeldma remove will be:
>>>
>>>  skeldma_remove
>>>  \-> skeldma_destroy
>>>      \-> rte_dma_pmd_release
>>>          \-> rte_dma_close
>>>              \-> dma_release
>>
>> OK, in this case, no need to dma_release from rte_dma_pmd_release,
>> because it is already called from rte_dma_close.
> 
> Ping for reply please.

Sorry, I think the previous reply was enough, Let me explain:

The PMD use following logic create dmadev:
  skeldma_probe
    \-> skeldma_create
      \-> rte_dma_pmd_allocate
        \-> dma_allocate
      \-> mark dmadev state to READY.

The PMD remove will be:
 skeldma_remove
  \-> skeldma_destroy
      \-> rte_dma_pmd_release
          \-> rte_dma_close
              \-> dma_release

The application close dmadev:
  rte_dma_close
   \-> dma_release

in the above case, the PMD remove and application close both call rte_dma_close,
I think that's what you expect.


skeldma is simple, please let me give you a complicated example:
  hisi_dma_probe
    \-> hisi_dma_create
      \-> rte_dma_pmd_allocate
        \-> dma_allocate
      \-> hisi_hw_init
        \-> if init fail, call rte_dma_pmd_release.
            \-> dma_release
        \-> if init OK, mark dmadev state to READY.

as you can see, if hisi_hw_init fail, it call rte_dma_pmd_release to release dmadev,
it will direct call dma_release.
if hisi_hw_init success, it mean the hardware also OK, then mark dmadev state to
READY. if PMD remove the dmadev it will call rte_dma_close because the dmadev's state
is READY, and the application could also call rte_dma_close to destroy dmadev.


The rte_dma_pmd_release take two function:
1. if the dmadev's hardware part init fail, the PMD could use this function release the
dmadev.
2. if the dmadev's hardware part init success, the PMD could use this function destroy
the dmadev.


If we don't have the rte_dma_pmd_release function, we should export dma_release function
which invoked when the hardware init fail.

And if we keep rte_dma_pmd_release, it correspond the rte_dma_pmd_allocate, the PMD just
invoke rte_dma_pmd_release to handle above both cases (hardware part init fail when probe
and remove phase).

Thanks.

> 
> 
> 
> 
> .
> 


^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v25 1/6] dmadev: introduce DMA device library
  2021-10-15  9:59             ` fengchengwen
@ 2021-10-15 13:46               ` Thomas Monjalon
  0 siblings, 0 replies; 339+ messages in thread
From: Thomas Monjalon @ 2021-10-15 13:46 UTC (permalink / raw)
  To: fengchengwen
  Cc: dev, ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko, mb, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh, kevin.laatz

15/10/2021 11:59, fengchengwen:
> On 2021/10/15 16:29, Thomas Monjalon wrote:
> > 13/10/2021 09:41, Thomas Monjalon:
> >> 13/10/2021 02:21, fengchengwen:
> >>> On 2021/10/13 3:09, Thomas Monjalon wrote:
> >>>> 11/10/2021 09:33, Chengwen Feng:
> >>>>> +static void
> >>>>> +dma_release(struct rte_dma_dev *dev)
> >>>>> +{
> >>>>> +	rte_free(dev->dev_private);
> >>>>> +	memset(dev, 0, sizeof(struct rte_dma_dev));
> >>>>> +}
> >> [...]
> >>>>> +int
> >>>>> +rte_dma_pmd_release(const char *name)
> >>>>> +{
> >>>>> +	struct rte_dma_dev *dev;
> >>>>> +
> >>>>> +	if (dma_check_name(name) != 0)
> >>>>> +		return -EINVAL;
> >>>>> +
> >>>>> +	dev = dma_find_by_name(name);
> >>>>> +	if (dev == NULL)
> >>>>> +		return -EINVAL;
> >>>>> +
> >>>>> +	dma_release(dev);
> >>>>> +	return 0;
> >>>>> +}
> >>>>
> >>>> Trying to understand the logic of creation/destroy.
> >>>> skeldma_probe
> >>>> \-> skeldma_create
> >>>>     \-> rte_dma_pmd_allocate
> >>>>         \-> dma_allocate
> >>>>             \-> dma_data_prepare
> >>>>                 \-> dma_dev_data_prepare
> >>>> skeldma_remove
> >>>> \-> skeldma_destroy
> >>>>     \-> rte_dma_pmd_release
> >>>>         \-> dma_release
> >>>
> >>> This patch only provide device allocate function, the 2st patch provide extra logic:
> >>>
> >>> 	diff --git a/lib/dmadev/rte_dmadev.c b/lib/dmadev/rte_dmadev.c
> >>> 	index 42a4693bd9..a6a5680d2b 100644
> >>> 	--- a/lib/dmadev/rte_dmadev.c
> >>> 	+++ b/lib/dmadev/rte_dmadev.c
> >>> 	@@ -201,6 +201,9 @@ rte_dma_pmd_release(const char *name)
> >>> 	        if (dev == NULL)
> >>>         	        return -EINVAL;
> >>>
> >>> 	+       if (dev->state == RTE_DMA_DEV_READY)
> >>> 	+               return rte_dma_close(dev->dev_id);
> >>> 	+
> >>>         	dma_release(dev);
> >>> 	        return 0;
> >>> 	 }
> >>>
> >>> So the skeldma remove will be:
> >>>
> >>>  skeldma_remove
> >>>  \-> skeldma_destroy
> >>>      \-> rte_dma_pmd_release
> >>>          \-> rte_dma_close
> >>>              \-> dma_release
> >>
> >> OK, in this case, no need to dma_release from rte_dma_pmd_release,
> >> because it is already called from rte_dma_close.
> > 
> > Ping for reply please.
> 
> Sorry, I think the previous reply was enough, Let me explain:

No, if previous answer was enough, I would not add a new comment.
Please read again:
"
no need to dma_release from rte_dma_pmd_release,
because it is already called from rte_dma_close
"

> The PMD use following logic create dmadev:
>   skeldma_probe
>     \-> skeldma_create
>       \-> rte_dma_pmd_allocate
>         \-> dma_allocate
>       \-> mark dmadev state to READY.
> 
> The PMD remove will be:
>  skeldma_remove
>   \-> skeldma_destroy
>       \-> rte_dma_pmd_release
>           \-> rte_dma_close
>               \-> dma_release
> 
> The application close dmadev:
>   rte_dma_close
>    \-> dma_release
> 
> in the above case, the PMD remove and application close both call rte_dma_close,
> I think that's what you expect.
> 
> 
> skeldma is simple, please let me give you a complicated example:
>   hisi_dma_probe
>     \-> hisi_dma_create
>       \-> rte_dma_pmd_allocate
>         \-> dma_allocate
>       \-> hisi_hw_init
>         \-> if init fail, call rte_dma_pmd_release.
>             \-> dma_release
>         \-> if init OK, mark dmadev state to READY.
> 
> as you can see, if hisi_hw_init fail, it call rte_dma_pmd_release to release dmadev,
> it will direct call dma_release.
> if hisi_hw_init success, it mean the hardware also OK, then mark dmadev state to
> READY. if PMD remove the dmadev it will call rte_dma_close because the dmadev's state
> is READY, and the application could also call rte_dma_close to destroy dmadev.
> 
> 
> The rte_dma_pmd_release take two function:
> 1. if the dmadev's hardware part init fail, the PMD could use this function release the
> dmadev.
> 2. if the dmadev's hardware part init success, the PMD could use this function destroy
> the dmadev.
> 
> 
> If we don't have the rte_dma_pmd_release function, we should export dma_release function
> which invoked when the hardware init fail.
> 
> And if we keep rte_dma_pmd_release, it correspond the rte_dma_pmd_allocate, the PMD just
> invoke rte_dma_pmd_release to handle above both cases (hardware part init fail when probe
> and remove phase).

You are justifying the existence of the functions, OK,
but I am just discussing one call of the function which is useless.

Anyway, now I am in the process of merging v26,
so I will send a fix.



^ permalink raw reply	[flat|nested] 339+ messages in thread

* Re: [dpdk-dev] [PATCH v26 0/6] support dmadev
  2021-10-13 12:24 ` [dpdk-dev] [PATCH v26 0/6] support dmadev Chengwen Feng
                     ` (5 preceding siblings ...)
  2021-10-13 12:25   ` [dpdk-dev] [PATCH v26 6/6] app/test: add dmadev API test Chengwen Feng
@ 2021-10-17 19:17   ` Thomas Monjalon
  6 siblings, 0 replies; 339+ messages in thread
From: Thomas Monjalon @ 2021-10-17 19:17 UTC (permalink / raw)
  To: Chengwen Feng
  Cc: ferruh.yigit, bruce.richardson, jerinj, jerinjacobk,
	andrew.rybchenko, dev, mb, nipun.gupta, hemant.agrawal,
	maxime.coquelin, honnappa.nagarahalli, david.marchand, sburla,
	pkapoor, konstantin.ananyev, conor.walsh, kevin.laatz

13/10/2021 14:24, Chengwen Feng:
> This patch set contains six patch for new add dmadev.
> 
> Chengwen Feng (6):
>   dmadev: introduce DMA device library
>   dmadev: add control plane API support
>   dmadev: add data plane API support
>   dmadev: add multi-process support
>   dma/skeleton: introduce skeleton dmadev driver
>   app/test: add dmadev API test

Applied, thanks for the big work.




^ permalink raw reply	[flat|nested] 339+ messages in thread

end of thread, other threads:[~2021-10-17 19:17 UTC | newest]

Thread overview: 339+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-02 13:18 [dpdk-dev] [PATCH] dmadev: introduce DMA device library Chengwen Feng
2021-07-02 13:59 ` Bruce Richardson
2021-07-04  9:30 ` Jerin Jacob
2021-07-05 10:52   ` Bruce Richardson
2021-07-05 11:12     ` Morten Brørup
2021-07-05 13:44       ` Bruce Richardson
2021-07-05 15:55     ` Jerin Jacob
2021-07-05 17:16       ` Bruce Richardson
2021-07-07  8:08         ` Jerin Jacob
2021-07-07  8:35           ` Bruce Richardson
2021-07-07 10:34             ` Jerin Jacob
2021-07-07 11:01               ` Bruce Richardson
2021-07-08  3:11                 ` fengchengwen
2021-07-08 18:35                   ` Jerin Jacob
2021-07-09  9:14                     ` Bruce Richardson
2021-07-11  7:14                       ` Jerin Jacob
2021-07-12  7:01                         ` Morten Brørup
2021-07-12  7:59                           ` Jerin Jacob
2021-07-06  8:20     ` fengchengwen
2021-07-06  9:27       ` Bruce Richardson
2021-07-06  3:01   ` fengchengwen
2021-07-06 10:01     ` Bruce Richardson
2021-07-04 14:57 ` Andrew Rybchenko
2021-07-06  3:56   ` fengchengwen
2021-07-06 10:02     ` Bruce Richardson
2021-07-04 15:21 ` Matan Azrad
2021-07-06  6:25   ` fengchengwen
2021-07-06  6:50     ` Matan Azrad
2021-07-06  9:08       ` fengchengwen
2021-07-06  9:17         ` Matan Azrad
2021-07-06 20:28 ` [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates Bruce Richardson
2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 1/9] dmadev: add missing exports Bruce Richardson
2021-07-07  8:26     ` David Marchand
2021-07-07  8:36       ` Bruce Richardson
2021-07-07  8:57         ` David Marchand
2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 2/9] dmadev: change virtual addresses to IOVA Bruce Richardson
2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 3/9] dmadev: add dump function Bruce Richardson
2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 4/9] dmadev: remove xstats functions Bruce Richardson
2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 5/9] dmadev: drop cookie typedef Bruce Richardson
2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 6/9] dmadev: allow NULL parameters to completed ops call Bruce Richardson
2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 7/9] dmadev: stats structure updates Bruce Richardson
2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 8/9] drivers: add dma driver category Bruce Richardson
2021-07-06 20:28   ` [dpdk-dev] [RFC UPDATE PATCH 9/9] app/test: add basic dmadev unit test Bruce Richardson
2021-07-07  3:16   ` [dpdk-dev] [RFC UPDATE PATCH 0/9] dmadev rfc suggested updates fengchengwen
2021-07-07  8:11     ` Bruce Richardson
2021-07-07  8:14     ` Bruce Richardson
2021-07-07 10:42     ` Jerin Jacob
2021-07-11  9:25 ` [dpdk-dev] [PATCH v2] dmadev: introduce DMA device library Chengwen Feng
2021-07-11  9:42   ` fengchengwen
2021-07-11 13:34     ` Jerin Jacob
2021-07-12  7:40       ` Morten Brørup
2021-07-11 14:25   ` Jerin Jacob
2021-07-12  7:15   ` Morten Brørup
2021-07-12  9:59   ` Jerin Jacob
2021-07-12 13:32     ` Bruce Richardson
2021-07-12 16:34       ` Jerin Jacob
2021-07-12 17:00         ` Bruce Richardson
2021-07-13  8:59           ` Jerin Jacob
2021-07-12 12:05   ` Bruce Richardson
2021-07-12 15:50   ` Bruce Richardson
2021-07-13  9:07     ` Jerin Jacob
2021-07-13 14:19   ` Ananyev, Konstantin
2021-07-13 14:28     ` Bruce Richardson
2021-07-13 12:27 ` [dpdk-dev] [PATCH v3] " Chengwen Feng
2021-07-13 13:06   ` fengchengwen
2021-07-13 13:37     ` Bruce Richardson
2021-07-15  6:44       ` Jerin Jacob
2021-07-15  8:25         ` Bruce Richardson
2021-07-15  9:49           ` Jerin Jacob
2021-07-15 10:00             ` Bruce Richardson
2021-07-13 16:02   ` Bruce Richardson
2021-07-14 12:22   ` Nipun Gupta
2021-07-15  8:29     ` fengchengwen
2021-07-15 11:16       ` Nipun Gupta
2021-07-15 12:11         ` Bruce Richardson
2021-07-15 12:31           ` Jerin Jacob
2021-07-15 12:34             ` Nipun Gupta
2021-07-14 16:05   ` Bruce Richardson
2021-07-15  7:10   ` Jerin Jacob
2021-07-15  9:03     ` Bruce Richardson
2021-07-15  9:30       ` Jerin Jacob
2021-07-15 10:03         ` Bruce Richardson
2021-07-15 10:05           ` Bruce Richardson
2021-07-15 15:41 ` [dpdk-dev] [PATCH v4] " Chengwen Feng
2021-07-15 16:04   ` fengchengwen
2021-07-15 16:33     ` Bruce Richardson
2021-07-16  3:04       ` fengchengwen
2021-07-16  9:50         ` Bruce Richardson
2021-07-16 12:34           ` Jerin Jacob
2021-07-16 12:40         ` Jerin Jacob
2021-07-16 12:48           ` Bruce Richardson
2021-07-16 12:54     ` Jerin Jacob
2021-07-16  2:45 ` [dpdk-dev] [PATCH v5] " Chengwen Feng
2021-07-16 13:20   ` Jerin Jacob
2021-07-16 14:41   ` Bruce Richardson
2021-07-19  3:29 ` [dpdk-dev] [PATCH v6] " Chengwen Feng
2021-07-19  6:21   ` Jerin Jacob
2021-07-19 13:20     ` fengchengwen
2021-07-19 13:36       ` Jerin Jacob
2021-07-19 13:05 ` [dpdk-dev] [PATCH v7] " Chengwen Feng
2021-07-20  1:14 ` [dpdk-dev] [PATCH v8] " Chengwen Feng
2021-07-20  5:03   ` Jerin Jacob
2021-07-20  6:53     ` fengchengwen
2021-07-20  9:43       ` Jerin Jacob
2021-07-20 10:13       ` Bruce Richardson
2021-07-20 11:12 ` [dpdk-dev] [PATCH v9] " Chengwen Feng
2021-07-20 12:05   ` Bruce Richardson
2021-07-20 12:46 ` [dpdk-dev] [PATCH v10] " Chengwen Feng
2021-07-26  6:53   ` fengchengwen
2021-07-26  8:31     ` Bruce Richardson
2021-07-27  3:57       ` fengchengwen
2021-07-26 11:03     ` Morten Brørup
2021-07-26 11:21       ` Jerin Jacob
2021-07-27  3:39 ` [dpdk-dev] [PATCH v11 0/2] support dmadev Chengwen Feng
2021-07-27  3:39   ` [dpdk-dev] [PATCH v11 1/2] dmadev: introduce DMA device library Chengwen Feng
2021-07-28 11:13     ` Bruce Richardson
2021-07-29  1:26       ` fengchengwen
2021-07-29  9:15         ` Bruce Richardson
2021-07-29 13:33           ` fengchengwen
2021-07-29 10:44         ` Jerin Jacob
2021-07-29 13:30           ` fengchengwen
2021-07-27  3:40   ` [dpdk-dev] [PATCH v11 2/2] doc: add dmadev library guide Chengwen Feng
2021-07-29 11:02     ` Jerin Jacob
2021-07-29 13:13       ` fengchengwen
2021-07-29 13:28         ` fengchengwen
2021-07-29 13:06 ` [dpdk-dev] [PATCH v12 0/6] support dmadev Chengwen Feng
2021-07-29 13:06   ` [dpdk-dev] [PATCH v12 1/6] dmadev: introduce DMA device library public APIs Chengwen Feng
2021-07-29 13:06   ` [dpdk-dev] [PATCH v12 2/6] dmadev: introduce DMA device library internal header Chengwen Feng
2021-07-29 13:06   ` [dpdk-dev] [PATCH v12 3/6] dmadev: introduce DMA device library PMD header Chengwen Feng
2021-07-29 13:06   ` [dpdk-dev] [PATCH v12 4/6] dmadev: introduce DMA device library implementation Chengwen Feng
2021-07-29 13:06   ` [dpdk-dev] [PATCH v12 5/6] doc: add DMA device library guide Chengwen Feng
2021-07-29 13:06   ` [dpdk-dev] [PATCH v12 6/6] maintainers: add for dmadev Chengwen Feng
2021-08-03 11:29 ` [dpdk-dev] [PATCH v13 0/6] support dmadev Chengwen Feng
2021-08-03 11:29   ` [dpdk-dev] [PATCH v13 1/6] dmadev: introduce DMA device library public APIs Chengwen Feng
2021-08-03 11:29   ` [dpdk-dev] [PATCH v13 2/6] dmadev: introduce DMA device library internal header Chengwen Feng
2021-08-03 11:29   ` [dpdk-dev] [PATCH v13 3/6] dmadev: introduce DMA device library PMD header Chengwen Feng
2021-08-03 11:29   ` [dpdk-dev] [PATCH v13 4/6] dmadev: introduce DMA device library implementation Chengwen Feng
2021-08-05 12:56     ` Walsh, Conor
2021-08-05 13:12       ` fengchengwen
2021-08-05 13:44         ` Conor Walsh
2021-08-03 11:29   ` [dpdk-dev] [PATCH v13 5/6] doc: add DMA device library guide Chengwen Feng
2021-08-03 14:55     ` Jerin Jacob
2021-08-05 13:15       ` fengchengwen
2021-08-03 11:29   ` [dpdk-dev] [PATCH v13 6/6] maintainers: add for dmadev Chengwen Feng
2021-08-03 11:46   ` [dpdk-dev] [PATCH v13 0/6] support dmadev fengchengwen
2021-08-10 11:54 ` [dpdk-dev] [PATCH v14 " Chengwen Feng
2021-08-10 11:54   ` [dpdk-dev] [PATCH v14 1/6] dmadev: introduce DMA device library public APIs Chengwen Feng
2021-08-10 11:54   ` [dpdk-dev] [PATCH v14 2/6] dmadev: introduce DMA device library internal header Chengwen Feng
2021-08-10 11:54   ` [dpdk-dev] [PATCH v14 3/6] dmadev: introduce DMA device library PMD header Chengwen Feng
2021-08-10 11:54   ` [dpdk-dev] [PATCH v14 4/6] dmadev: introduce DMA device library implementation Chengwen Feng
2021-08-10 11:54   ` [dpdk-dev] [PATCH v14 5/6] doc: add DMA device library guide Chengwen Feng
2021-08-10 15:27     ` Walsh, Conor
2021-08-11  0:47       ` fengchengwen
2021-08-13  9:20       ` fengchengwen
2021-08-13 10:12         ` Walsh, Conor
2021-08-10 11:54   ` [dpdk-dev] [PATCH v14 6/6] maintainers: add for dmadev Chengwen Feng
2021-08-13  9:09 ` [dpdk-dev] [PATCH v15 0/6] support dmadev Chengwen Feng
2021-08-13  9:09   ` [dpdk-dev] [PATCH v15 1/6] dmadev: introduce DMA device library public APIs Chengwen Feng
2021-08-19 14:52     ` Bruce Richardson
2021-08-23  3:43       ` fengchengwen
2021-08-13  9:09   ` [dpdk-dev] [PATCH v15 2/6] dmadev: introduce DMA device library internal header Chengwen Feng
2021-08-13  9:09   ` [dpdk-dev] [PATCH v15 3/6] dmadev: introduce DMA device library PMD header Chengwen Feng
2021-08-13  9:09   ` [dpdk-dev] [PATCH v15 4/6] dmadev: introduce DMA device library implementation Chengwen Feng
2021-08-13  9:09   ` [dpdk-dev] [PATCH v15 5/6] doc: add DMA device library guide Chengwen Feng
2021-08-13  9:09   ` [dpdk-dev] [PATCH v15 6/6] maintainers: add for dmadev Chengwen Feng
2021-08-23  3:31 ` [dpdk-dev] [PATCH v16 0/9] support dmadev Chengwen Feng
2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 1/9] dmadev: introduce DMA device library public APIs Chengwen Feng
2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 2/9] dmadev: introduce DMA device library internal header Chengwen Feng
2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 3/9] dmadev: introduce DMA device library PMD header Chengwen Feng
2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 4/9] dmadev: introduce DMA device library implementation Chengwen Feng
2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 5/9] doc: add DMA device library guide Chengwen Feng
2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 6/9] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
2021-08-26 18:39     ` Bruce Richardson
2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 7/9] dma/skeleton: add test cases Chengwen Feng
2021-08-23 14:03     ` Bruce Richardson
2021-08-26  9:30       ` fengchengwen
2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 8/9] test: enable dmadev skeleton test Chengwen Feng
2021-08-23  3:31   ` [dpdk-dev] [PATCH v16 9/9] maintainers: add for dmadev Chengwen Feng
2021-08-28  7:29 ` [dpdk-dev] [PATCH v17 0/8] support dmadev Chengwen Feng
2021-08-28  7:29   ` [dpdk-dev] [PATCH v17 1/8] dmadev: introduce DMA device library public APIs Chengwen Feng
2021-08-28  7:30   ` [dpdk-dev] [PATCH v17 2/8] dmadev: introduce DMA device library internal header Chengwen Feng
2021-08-28  7:30   ` [dpdk-dev] [PATCH v17 3/8] dmadev: introduce DMA device library PMD header Chengwen Feng
2021-08-28  7:30   ` [dpdk-dev] [PATCH v17 4/8] dmadev: introduce DMA device library implementation Chengwen Feng
2021-08-28  7:30   ` [dpdk-dev] [PATCH v17 5/8] doc: add DMA device library guide Chengwen Feng
2021-08-28  7:30   ` [dpdk-dev] [PATCH v17 6/8] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
2021-08-28  7:30   ` [dpdk-dev] [PATCH v17 7/8] app/test: add dmadev API test Chengwen Feng
2021-08-28  7:30   ` [dpdk-dev] [PATCH v17 8/8] maintainers: add for dmadev Chengwen Feng
2021-08-28  8:25     ` fengchengwen
2021-08-30  8:19       ` Bruce Richardson
2021-09-02 10:54 ` [dpdk-dev] [PATCH v18 0/8] support dmadev Chengwen Feng
2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 1/8] dmadev: introduce DMA device library public APIs Chengwen Feng
2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 2/8] dmadev: introduce DMA device library internal header Chengwen Feng
2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 3/8] dmadev: introduce DMA device library PMD header Chengwen Feng
2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 4/8] dmadev: introduce DMA device library implementation Chengwen Feng
2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 5/8] doc: add DMA device library guide Chengwen Feng
2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 6/8] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 7/8] app/test: add dmadev API test Chengwen Feng
2021-09-02 10:54   ` [dpdk-dev] [PATCH v18 8/8] maintainers: add for dmadev Chengwen Feng
2021-09-02 11:51     ` Bruce Richardson
2021-09-02 13:39       ` fengchengwen
2021-09-03 12:59         ` Maxime Coquelin
2021-09-04  7:02           ` fengchengwen
2021-09-06  1:46             ` Li, Xiaoyun
2021-09-06  8:00               ` fengchengwen
2021-09-06  2:03           ` Xia, Chenbo
2021-09-06  8:01             ` fengchengwen
2021-09-02 13:13 ` [dpdk-dev] [PATCH v19 0/7] support dmadev Chengwen Feng
2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 1/7] dmadev: introduce DMA device library public APIs Chengwen Feng
2021-09-03 11:42     ` Gagandeep Singh
2021-09-04  1:31       ` fengchengwen
2021-09-06  6:48         ` Gagandeep Singh
2021-09-06  7:52           ` fengchengwen
2021-09-06  8:06             ` Jerin Jacob
2021-09-06  8:08             ` Bruce Richardson
2021-09-07 12:55             ` fengchengwen
2021-09-03 13:03     ` Bruce Richardson
2021-09-04  3:05       ` fengchengwen
2021-09-04 10:10       ` Morten Brørup
2021-09-03 15:13     ` Kevin Laatz
2021-09-03 15:35     ` Conor Walsh
2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 2/7] dmadev: introduce DMA device library internal header Chengwen Feng
2021-09-03 15:13     ` Kevin Laatz
2021-09-03 15:35     ` Conor Walsh
2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 3/7] dmadev: introduce DMA device library PMD header Chengwen Feng
2021-09-03 15:13     ` Kevin Laatz
2021-09-03 15:35     ` Conor Walsh
2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 4/7] dmadev: introduce DMA device library implementation Chengwen Feng
2021-09-03 15:13     ` Kevin Laatz
2021-09-03 15:30       ` Bruce Richardson
2021-09-03 15:35     ` Conor Walsh
2021-09-04  8:52       ` fengchengwen
2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 5/7] doc: add DMA device library guide Chengwen Feng
2021-09-03 15:13     ` Kevin Laatz
2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 6/7] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
2021-09-03 15:14     ` Kevin Laatz
2021-09-04  7:17       ` fengchengwen
2021-09-03 15:36     ` Conor Walsh
2021-09-02 13:13   ` [dpdk-dev] [PATCH v19 7/7] app/test: add dmadev API test Chengwen Feng
2021-09-02 14:11     ` Walsh, Conor
2021-09-03  0:39       ` fengchengwen
2021-09-03 15:38         ` Walsh, Conor
2021-09-04  7:22           ` fengchengwen
2021-09-03 15:14     ` Kevin Laatz
2021-09-04 10:10 ` [dpdk-dev] [PATCH v20 0/7] support dmadev Chengwen Feng
2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 1/7] dmadev: introduce DMA device library public APIs Chengwen Feng
2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 2/7] dmadev: introduce DMA device library internal header Chengwen Feng
2021-09-06 13:35     ` Bruce Richardson
2021-09-07 13:05       ` fengchengwen
2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 3/7] dmadev: introduce DMA device library PMD header Chengwen Feng
2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 4/7] dmadev: introduce DMA device library implementation Chengwen Feng
2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 5/7] doc: add DMA device library guide Chengwen Feng
2021-09-04 10:17     ` Jerin Jacob
2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 6/7] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
2021-09-04 10:10   ` [dpdk-dev] [PATCH v20 7/7] app/test: add dmadev API test Chengwen Feng
2021-09-06 13:37   ` [dpdk-dev] [PATCH v20 0/7] support dmadev Bruce Richardson
2021-09-07 12:56 ` [dpdk-dev] [PATCH v21 " Chengwen Feng
2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 1/7] dmadev: introduce DMA device library public APIs Chengwen Feng
2021-09-09 10:33     ` Thomas Monjalon
2021-09-09 11:18       ` Bruce Richardson
2021-09-09 11:29         ` Thomas Monjalon
2021-09-09 12:45           ` Bruce Richardson
2021-09-09 13:54             ` fengchengwen
2021-09-09 14:26               ` Thomas Monjalon
2021-09-09 14:31                 ` Bruce Richardson
2021-09-09 14:28               ` Bruce Richardson
2021-09-09 15:12                 ` Morten Brørup
2021-09-09 13:33       ` fengchengwen
2021-09-09 14:19         ` Thomas Monjalon
2021-09-16  3:57       ` fengchengwen
2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 2/7] dmadev: introduce DMA device library internal header Chengwen Feng
2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 3/7] dmadev: introduce DMA device library PMD header Chengwen Feng
2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 4/7] dmadev: introduce DMA device library implementation Chengwen Feng
2021-09-08  9:54     ` Walsh, Conor
2021-09-09 13:25       ` fengchengwen
2021-09-15 13:51     ` Kevin Laatz
2021-09-15 14:34       ` Bruce Richardson
2021-09-15 14:47         ` Kevin Laatz
2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 5/7] doc: add DMA device library guide Chengwen Feng
2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 6/7] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
2021-09-07 12:56   ` [dpdk-dev] [PATCH v21 7/7] app/test: add dmadev API test Chengwen Feng
2021-09-16  3:41 ` [dpdk-dev] [PATCH v22 0/5] support dmadev Chengwen Feng
2021-09-16  3:41   ` [dpdk-dev] [PATCH v22 1/5] dmadev: introduce DMA device library Chengwen Feng
2021-09-16  3:41   ` [dpdk-dev] [PATCH v22 2/5] dmadev: add control plane function support Chengwen Feng
2021-09-16  3:41   ` [dpdk-dev] [PATCH v22 3/5] dmadev: add data " Chengwen Feng
2021-09-16  3:41   ` [dpdk-dev] [PATCH v22 4/5] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
2021-09-16  3:41   ` [dpdk-dev] [PATCH v22 5/5] app/test: add dmadev API test Chengwen Feng
2021-09-24 10:53 ` [dpdk-dev] [PATCH v23 0/6] support dmadev Chengwen Feng
2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 1/6] dmadev: introduce DMA device library Chengwen Feng
2021-10-04 21:12     ` Radha Mohan
2021-10-05  8:24       ` Kevin Laatz
2021-10-05 16:39         ` Radha Mohan
2021-10-08  1:52       ` fengchengwen
2021-10-06 10:26     ` Thomas Monjalon
2021-10-08  7:13       ` fengchengwen
2021-10-08 10:09         ` Thomas Monjalon
2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 2/6] dmadev: add control plane function support Chengwen Feng
2021-10-05 10:16     ` Matan Azrad
2021-10-08  3:28       ` fengchengwen
2021-10-06 10:46     ` Thomas Monjalon
2021-10-08  7:55       ` fengchengwen
2021-10-08 10:18         ` Thomas Monjalon
2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 3/6] dmadev: add data " Chengwen Feng
2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 4/6] dmadev: add multi-process support Chengwen Feng
2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 5/6] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
2021-09-24 10:53   ` [dpdk-dev] [PATCH v23 6/6] app/test: add dmadev API test Chengwen Feng
2021-10-09  9:33 ` [dpdk-dev] [PATCH v24 0/6] support dmadev Chengwen Feng
2021-10-09  9:33   ` [dpdk-dev] [PATCH v24 1/6] dmadev: introduce DMA device library Chengwen Feng
2021-10-09  9:33   ` [dpdk-dev] [PATCH v24 2/6] dmadev: add control plane API support Chengwen Feng
2021-10-09  9:33   ` [dpdk-dev] [PATCH v24 3/6] dmadev: add data " Chengwen Feng
2021-10-09 10:03     ` fengchengwen
2021-10-11 10:40     ` Bruce Richardson
2021-10-11 12:31       ` fengchengwen
2021-10-09  9:33   ` [dpdk-dev] [PATCH v24 4/6] dmadev: add multi-process support Chengwen Feng
2021-10-09  9:33   ` [dpdk-dev] [PATCH v24 5/6] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
2021-10-09  9:33   ` [dpdk-dev] [PATCH v24 6/6] app/test: add dmadev API test Chengwen Feng
2021-10-11  7:33 ` [dpdk-dev] [PATCH v25 0/6] support dmadev Chengwen Feng
2021-10-11  7:33   ` [dpdk-dev] [PATCH v25 1/6] dmadev: introduce DMA device library Chengwen Feng
2021-10-12 19:09     ` Thomas Monjalon
2021-10-13  0:21       ` fengchengwen
2021-10-13  7:41         ` Thomas Monjalon
2021-10-15  8:29           ` Thomas Monjalon
2021-10-15  9:59             ` fengchengwen
2021-10-15 13:46               ` Thomas Monjalon
2021-10-11  7:33   ` [dpdk-dev] [PATCH v25 2/6] dmadev: add control plane API support Chengwen Feng
2021-10-11 15:44     ` Bruce Richardson
2021-10-12  3:57       ` fengchengwen
2021-10-12 18:57     ` Thomas Monjalon
2021-10-11  7:33   ` [dpdk-dev] [PATCH v25 3/6] dmadev: add data " Chengwen Feng
2021-10-11  7:33   ` [dpdk-dev] [PATCH v25 4/6] dmadev: add multi-process support Chengwen Feng
2021-10-11  7:33   ` [dpdk-dev] [PATCH v25 5/6] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
2021-10-11  7:33   ` [dpdk-dev] [PATCH v25 6/6] app/test: add dmadev API test Chengwen Feng
2021-10-13 12:24 ` [dpdk-dev] [PATCH v26 0/6] support dmadev Chengwen Feng
2021-10-13 12:24   ` [dpdk-dev] [PATCH v26 1/6] dmadev: introduce DMA device library Chengwen Feng
2021-10-13 12:24   ` [dpdk-dev] [PATCH v26 2/6] dmadev: add control plane API support Chengwen Feng
2021-10-13 12:24   ` [dpdk-dev] [PATCH v26 3/6] dmadev: add data " Chengwen Feng
2021-10-13 12:24   ` [dpdk-dev] [PATCH v26 4/6] dmadev: add multi-process support Chengwen Feng
2021-10-13 12:24   ` [dpdk-dev] [PATCH v26 5/6] dma/skeleton: introduce skeleton dmadev driver Chengwen Feng
2021-10-13 12:25   ` [dpdk-dev] [PATCH v26 6/6] app/test: add dmadev API test Chengwen Feng
2021-10-17 19:17   ` [dpdk-dev] [PATCH v26 0/6] support dmadev Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).