Introduce rawdev driver support for ZXDH which
can help to connect two separate hosts with each other.

Signed-off-by: Yong Zhang <zhang.yong25@zte.com.cn>
---
 MAINTAINERS                    |   5 +
 doc/guides/rawdevs/index.rst   |   1 +
 doc/guides/rawdevs/zxdh.rst    |  30 +++++
 drivers/raw/meson.build        |   1 +
 drivers/raw/zxdh/meson.build   |   5 +
 drivers/raw/zxdh/zxdh_rawdev.c | 220 +++++++++++++++++++++++++++++++++
 drivers/raw/zxdh/zxdh_rawdev.h | 118 ++++++++++++++++++
 7 files changed, 380 insertions(+)
 create mode 100644 doc/guides/rawdevs/zxdh.rst
 create mode 100644 drivers/raw/zxdh/meson.build
 create mode 100644 drivers/raw/zxdh/zxdh_rawdev.c
 create mode 100644 drivers/raw/zxdh/zxdh_rawdev.h

diff --git a/MAINTAINERS b/MAINTAINERS
index c5a703b5c0..6dd4fbae6e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1511,6 +1511,11 @@ M: Gagandeep Singh <g.singh@nxp.com>
 F: drivers/raw/dpaa2_cmdif/
 F: doc/guides/rawdevs/dpaa2_cmdif.rst
 
+ZXDH
+M: Yong Zhang <zhang.yong25@zte.com.cn>
+F: drivers/raw/zxdh/
+F: doc/guides/rawdevs/zxdh.rst
+
 
 Packet processing
 -----------------
diff --git a/doc/guides/rawdevs/index.rst b/doc/guides/rawdevs/index.rst
index f34315f051..d85a4b7148 100644
--- a/doc/guides/rawdevs/index.rst
+++ b/doc/guides/rawdevs/index.rst
@@ -16,3 +16,4 @@ application through rawdev API.
     dpaa2_cmdif
     ifpga
     ntb
+    zxdh
diff --git a/doc/guides/rawdevs/zxdh.rst b/doc/guides/rawdevs/zxdh.rst
new file mode 100644
index 0000000000..fa7ada1004
--- /dev/null
+++ b/doc/guides/rawdevs/zxdh.rst
@@ -0,0 +1,30 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+    Copyright 2024 ZTE Corporation
+
+ZXDH Rawdev Driver
+======================
+
+The ``zxdh`` rawdev driver is an implementation of the rawdev API,
+that provides communication between two separate hosts.
+This is achieved via using the GDMA controller of Dinghai SoC,
+which can be configured through exposed MPF devices.
+
+Device Setup
+-------------
+
+It is recommended to bind the ZXDH MPF kernel driver for MPF devices (Not mandatory).
+The kernel drivers can be downloaded at `ZTE Official Website
+<https://enterprise.zte.com.cn/>`_.
+
+Initialization
+--------------
+
+The ``zxdh`` rawdev driver needs to work in IOVA PA mode.
+Consider using ``--iova-mode=pa`` in the EAL options.
+
+Platform Requirement
+~~~~~~~~~~~~~~~~~~~~
+
+This PMD is only supported on ZTE Neo Platforms:
+- Neo X510/X512
+
diff --git a/drivers/raw/meson.build b/drivers/raw/meson.build
index 05cad143fe..237d1bdd80 100644
--- a/drivers/raw/meson.build
+++ b/drivers/raw/meson.build
@@ -12,5 +12,6 @@ drivers = [
         'ifpga',
         'ntb',
         'skeleton',
+        'zxdh',
 ]
 std_deps = ['rawdev']
diff --git a/drivers/raw/zxdh/meson.build b/drivers/raw/zxdh/meson.build
new file mode 100644
index 0000000000..266d3db6d8
--- /dev/null
+++ b/drivers/raw/zxdh/meson.build
@@ -0,0 +1,5 @@
+#SPDX-License-Identifier: BSD-3-Clause
+#Copyright 2024 ZTE Corporation
+
+deps += ['rawdev', 'kvargs', 'mbuf', 'bus_pci']
+sources = files('zxdh_rawdev.c')
diff --git a/drivers/raw/zxdh/zxdh_rawdev.c b/drivers/raw/zxdh/zxdh_rawdev.c
new file mode 100644
index 0000000000..269c4f92e0
--- /dev/null
+++ b/drivers/raw/zxdh/zxdh_rawdev.c
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2024 ZTE Corporation
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <string.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <rte_byteorder.h>
+#include <rte_errno.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_kvargs.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_lcore.h>
+#include <rte_cycles.h>
+#include <rte_memzone.h>
+#include <rte_atomic.h>
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+#include <rte_pci.h>
+#include <bus_pci_driver.h>
+#include <rte_eal_paging.h>
+
+#include "zxdh_rawdev.h"
+
+/* Register offset */
+#define ZXDH_GDMA_BASE_OFFSET                   0x100000
+
+#define ZXDH_GDMA_CHAN_SHIFT                    0x80
+char zxdh_gdma_driver_name[] = "rawdev_zxdh_gdma";
+char dev_name[] = "zxdh_gdma";
+
+uint32_t
+zxdh_gdma_read_reg(struct rte_rawdev *dev, uint16_t queue_id, uint32_t offset)
+{
+    struct zxdh_gdma_rawdev *gdmadev = zxdh_gdma_rawdev_get_priv(dev);
+    uint32_t addr = 0;
+    uint32_t val = 0;
+
+    addr = offset + queue_id * ZXDH_GDMA_CHAN_SHIFT;
+    val = *(uint32_t *)(gdmadev->base_addr + addr);
+
+    return val;
+}
+
+void
+zxdh_gdma_write_reg(struct rte_rawdev *dev, uint16_t queue_id, uint32_t offset, uint32_t val)
+{
+    struct zxdh_gdma_rawdev *gdmadev = zxdh_gdma_rawdev_get_priv(dev);
+    uint32_t addr = 0;
+
+    addr = offset + queue_id * ZXDH_GDMA_CHAN_SHIFT;
+    *(uint32_t *)(gdmadev->base_addr + addr) = val;
+}
+
+static const struct rte_rawdev_ops zxdh_gdma_rawdev_ops = {
+};
+
+static int
+zxdh_gdma_map_resource(struct rte_pci_device *dev)
+{
+    int fd = -1;
+    char devname[PATH_MAX];
+    void *mapaddr = NULL;
+    struct rte_pci_addr *loc;
+
+    loc = &dev->addr;
+    snprintf(devname, sizeof(devname), "%s/" PCI_PRI_FMT "/resource0",
+        rte_pci_get_sysfs_path(),
+        loc->domain, loc->bus, loc->devid,
+        loc->function);
+
+        fd = open(devname, O_RDWR);
+        if (fd < 0) {
+            ZXDH_PMD_LOG(ERR, "Cannot open %s: %s", devname, strerror(errno));
+            return -1;
+        }
+
+    /* Map the PCI memory resource of device */
+    mapaddr = rte_mem_map(NULL, (size_t)dev->mem_resource[0].len,
+                RTE_PROT_READ | RTE_PROT_WRITE,
+                RTE_MAP_SHARED, fd, 0);
+    if (mapaddr == NULL) {
+        ZXDH_PMD_LOG(ERR, "cannot map resource(%d, 0x%zx): %s (%p)",
+                fd, (size_t)dev->mem_resource[0].len,
+                rte_strerror(rte_errno), mapaddr);
+        close(fd);
+        return -1;
+    }
+
+    close(fd);
+    dev->mem_resource[0].addr = mapaddr;
+
+    return 0;
+}
+
+static void
+zxdh_gdma_unmap_resource(void *requested_addr, size_t size)
+{
+    if (requested_addr == NULL)
+        return;
+
+    /* Unmap the PCI memory resource of device */
+    if (rte_mem_unmap(requested_addr, size))
+        ZXDH_PMD_LOG(ERR, "cannot mem unmap(%p, %#zx): %s",
+            requested_addr, size, rte_strerror(rte_errno));
+    else
+        ZXDH_PMD_LOG(DEBUG, "PCI memory unmapped at %p", requested_addr);
+}
+
+static int
+zxdh_gdma_rawdev_probe(struct rte_pci_driver *pci_drv __rte_unused,
+                        struct rte_pci_device *pci_dev)
+{
+    struct rte_rawdev *dev = NULL;
+    struct zxdh_gdma_rawdev *gdmadev = NULL;
+    struct zxdh_gdma_queue *queue = NULL;
+    uint8_t i = 0;
+    int ret;
+
+    if (pci_dev->mem_resource[0].phys_addr == 0) {
+        ZXDH_PMD_LOG(ERR, "PCI bar0 resource is invalid");
+        return -1;
+    }
+
+    ret = zxdh_gdma_map_resource(pci_dev);
+    if (ret != 0) {
+        ZXDH_PMD_LOG(ERR, "Failed to mmap pci device(%s)", pci_dev->name);
+        return -1;
+    }
+    ZXDH_PMD_LOG(INFO, "%s bar0 0x%"PRIx64" mapped at %p",
+                pci_dev->name, pci_dev->mem_resource[0].phys_addr,
+                pci_dev->mem_resource[0].addr);
+
+    dev = rte_rawdev_pmd_allocate(dev_name, sizeof(struct zxdh_gdma_rawdev), rte_socket_id());
+    if (dev == NULL) {
+        ZXDH_PMD_LOG(ERR, "Unable to allocate gdma rawdev");
+        goto err_out;
+    }
+    ZXDH_PMD_LOG(INFO, "Init %s on NUMA node %d, dev_id is %d",
+                        dev_name, rte_socket_id(), dev->dev_id);
+
+    dev->dev_ops = &zxdh_gdma_rawdev_ops;
+    dev->device = &pci_dev->device;
+    dev->driver_name = zxdh_gdma_driver_name;
+    gdmadev = zxdh_gdma_rawdev_get_priv(dev);
+    gdmadev->device_state = ZXDH_GDMA_DEV_STOPPED;
+    gdmadev->rawdev = dev;
+    gdmadev->queue_num = ZXDH_GDMA_TOTAL_CHAN_NUM;
+    gdmadev->used_num = 0;
+    gdmadev->base_addr = (uintptr_t)pci_dev->mem_resource[0].addr + ZXDH_GDMA_BASE_OFFSET;
+
+    for (i = 0; i < ZXDH_GDMA_TOTAL_CHAN_NUM; i++) {
+        queue = &(gdmadev->vqs[i]);
+        queue->enable = 0;
+        queue->queue_size = ZXDH_GDMA_QUEUE_SIZE;
+        rte_spinlock_init(&(queue->enqueue_lock));
+    }
+
+    return 0;
+
+err_out:
+    zxdh_gdma_unmap_resource(pci_dev->mem_resource[0].addr,
+        (size_t)pci_dev->mem_resource[0].len);
+    return -1;
+}
+
+static int
+zxdh_gdma_rawdev_remove(struct rte_pci_device *pci_dev)
+{
+    struct rte_rawdev *dev = NULL;
+    int ret = 0;
+
+    dev = rte_rawdev_pmd_get_named_dev(dev_name);
+    if (dev == NULL)
+        return -EINVAL;
+
+    /* rte_rawdev_close is called by pmd_release */
+    ret = rte_rawdev_pmd_release(dev);
+    if (ret != 0) {
+        ZXDH_PMD_LOG(ERR, "Device cleanup failed");
+        return -1;
+    }
+
+    zxdh_gdma_unmap_resource(pci_dev->mem_resource[0].addr,
+        (size_t)pci_dev->mem_resource[0].len);
+
+    ZXDH_PMD_LOG(DEBUG, "rawdev %s remove done!", dev_name);
+
+    return ret;
+}
+
+static const struct rte_pci_id zxdh_gdma_rawdev_map[] = {
+    { RTE_PCI_DEVICE(ZXDH_GDMA_VENDORID, ZXDH_GDMA_DEVICEID) },
+    { .vendor_id = 0, /* sentinel */ },
+};
+
+static struct rte_pci_driver zxdh_gdma_rawdev_pmd = {
+    .id_table = zxdh_gdma_rawdev_map,
+    .drv_flags = 0,
+    .probe = zxdh_gdma_rawdev_probe,
+    .remove = zxdh_gdma_rawdev_remove,
+};
+
+RTE_PMD_REGISTER_PCI(zxdh_gdma_rawdev_pci_driver, zxdh_gdma_rawdev_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(zxdh_gdma_rawdev_pci_driver, zxdh_gdma_rawdev_map);
+RTE_LOG_REGISTER_DEFAULT(zxdh_gdma_rawdev_logtype, NOTICE);
diff --git a/drivers/raw/zxdh/zxdh_rawdev.h b/drivers/raw/zxdh/zxdh_rawdev.h
new file mode 100644
index 0000000000..b4d977ce54
--- /dev/null
+++ b/drivers/raw/zxdh/zxdh_rawdev.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2024 ZTE Corporation
+ */
+
+#ifndef __ZXDH_RAWDEV_H__
+#define __ZXDH_RAWDEV_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_rawdev.h>
+#include <rte_spinlock.h>
+
+extern int zxdh_gdma_rawdev_logtype;
+#define RTE_LOGTYPE_ZXDH_GDMA                   zxdh_gdma_rawdev_logtype
+
+#define ZXDH_PMD_LOG(level, ...) \
+    RTE_LOG_LINE_PREFIX(level, ZXDH_GDMA, \
+        "%s() line %u: ", __func__ RTE_LOG_COMMA __LINE__, __VA_ARGS__)
+
+#define ZXDH_GDMA_VENDORID                      0x1cf2
+#define ZXDH_GDMA_DEVICEID                      0x8044
+
+#define ZXDH_GDMA_TOTAL_CHAN_NUM                58
+#define ZXDH_GDMA_QUEUE_SIZE                    16384
+#define ZXDH_GDMA_RING_SIZE                     32768
+
+enum zxdh_gdma_device_state {
+    ZXDH_GDMA_DEV_RUNNING,
+    ZXDH_GDMA_DEV_STOPPED
+};
+
+struct zxdh_gdma_buff_desc {
+    uint32_t SrcAddr_L;
+    uint32_t DstAddr_L;
+    uint32_t Xpara;
+    uint32_t ZY_para;
+    uint32_t ZY_SrcStep;
+    uint32_t ZY_DstStep;
+    uint32_t ExtAddr;
+    uint32_t LLI_Addr_L;
+    uint32_t LLI_Addr_H;
+    uint32_t ChCont;
+    uint32_t LLI_User;
+    uint32_t ErrAddr;
+    uint32_t Control;
+    uint32_t SrcAddr_H;
+    uint32_t DstAddr_H;
+    uint32_t Reserved;
+};
+
+struct zxdh_gdma_job {
+    uint64_t src;
+    uint64_t dest;
+    uint32_t len;
+    uint32_t flags;
+    uint64_t cnxt;
+    uint16_t status;
+    uint16_t vq_id;
+    void *usr_elem;
+    uint8_t ep_id;
+    uint8_t pf_id;
+    uint16_t vf_id;
+};
+
+struct zxdh_gdma_queue {
+    uint8_t   enable;
+    uint8_t   is_txq;
+    uint16_t  vq_id;
+    uint16_t  queue_size;
+    /* 0:GDMA needs to be configured through the APB interface */
+    uint16_t  flag;
+    uint32_t  user;
+    uint16_t  tc_cnt;
+    rte_spinlock_t enqueue_lock;
+    struct {
+        uint16_t avail_idx;
+        uint16_t last_avail_idx;
+        rte_iova_t ring_mem;
+        const struct rte_memzone *ring_mz;
+        struct zxdh_gdma_buff_desc *desc;
+    } ring;
+    struct {
+        uint16_t  free_cnt;
+        uint16_t  deq_cnt;
+        uint16_t  pend_cnt;
+        uint16_t  enq_idx;
+        uint16_t  deq_idx;
+        uint16_t  used_idx;
+        struct zxdh_gdma_job **job;
+    } sw_ring;
+};
+
+struct zxdh_gdma_rawdev {
+    struct rte_device *device;
+    struct rte_rawdev *rawdev;
+    uintptr_t base_addr;
+    uint8_t queue_num; /* total queue num */
+    uint8_t used_num;  /* used  queue num */
+    enum zxdh_gdma_device_state device_state;
+    struct zxdh_gdma_queue vqs[ZXDH_GDMA_TOTAL_CHAN_NUM];
+};
+
+static inline struct zxdh_gdma_rawdev *
+zxdh_gdma_rawdev_get_priv(const struct rte_rawdev *rawdev)
+{
+    return rawdev->dev_private;
+}
+
+uint32_t zxdh_gdma_read_reg(struct rte_rawdev *dev, uint16_t qidx, uint32_t offset);
+void zxdh_gdma_write_reg(struct rte_rawdev *dev, uint16_t qidx, uint32_t offset, uint32_t val);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __ZXDH_RAWDEV_H__ */
-- 
2.43.0