From 7da90caa892abc0ab27fc11e892f6616f4bc8c9a Mon Sep 17 00:00:00 2001

From: Yong Zhang <zhang.yong25@zte.com.cn>

Date: Wed, 12 Jun 2024 17:35:34 +0800

Subject: [PATCH] raw/zxdh: introduce zxdh raw device driver


Introduce rawdev driver support for ZXDH which

can help to connect two separate hosts with each other.


Signed-off-by: Yong Zhang <zhang.yong25@zte.com.cn>

---

 MAINTAINERS                    |   5 +

 doc/guides/rawdevs/index.rst   |   1 +

 doc/guides/rawdevs/zxdh.rst    |  28 +

 drivers/raw/meson.build        |   1 +

 drivers/raw/zxdh/meson.build   |   5 +

 drivers/raw/zxdh/zxdh_pci.c    | 564 ++++++++++++++++++++

 drivers/raw/zxdh/zxdh_pci.h    |  40 ++

 drivers/raw/zxdh/zxdh_rawdev.c | 911 +++++++++++++++++++++++++++++++++

 drivers/raw/zxdh/zxdh_rawdev.h | 165 ++++++

 9 files changed, 1720 insertions(+)

 create mode 100644 doc/guides/rawdevs/zxdh.rst

 create mode 100644 drivers/raw/zxdh/meson.build

 create mode 100644 drivers/raw/zxdh/zxdh_pci.c

 create mode 100644 drivers/raw/zxdh/zxdh_pci.h

 create mode 100644 drivers/raw/zxdh/zxdh_rawdev.c

 create mode 100644 drivers/raw/zxdh/zxdh_rawdev.h


diff --git a/MAINTAINERS b/MAINTAINERS

index c9adff9846..4fcf137c10 100644

--- a/MAINTAINERS

+++ b/MAINTAINERS

@@ -1478,6 +1478,11 @@ M: Gagandeep Singh <g.singh@nxp.com>

 F: drivers/raw/dpaa2_cmdif/

 F: doc/guides/rawdevs/dpaa2_cmdif.rst

 

+ZXDH

+M: Yong Zhang <zhang.yong25@zte.com.cn>

+F: drivers/raw/zxdh/

+F: doc/guides/rawdevs/zxdh.rst

+

 

 Packet processing

 -----------------

diff --git a/doc/guides/rawdevs/index.rst b/doc/guides/rawdevs/index.rst

index f34315f051..d85a4b7148 100644

--- a/doc/guides/rawdevs/index.rst

+++ b/doc/guides/rawdevs/index.rst

@@ -16,3 +16,4 @@ application through rawdev API.

     dpaa2_cmdif

     ifpga

     ntb

+    zxdh

diff --git a/doc/guides/rawdevs/zxdh.rst b/doc/guides/rawdevs/zxdh.rst

new file mode 100644

index 0000000000..15ad85261f

--- /dev/null

+++ b/doc/guides/rawdevs/zxdh.rst

@@ -0,0 +1,28 @@

+..  SPDX-License-Identifier: BSD-3-Clause

+    Copyright 2024 ZTE Corporation

+

+ZXDH Rawdev Driver

+======================

+

+The ``zxdh`` rawdev driver is an implementation of the rawdev API,

+that provides communication between two separate hosts.

+This is achieved via using the GDMA controller of Dinghai SoC.

+

+

+Initialization

+--------------

+

+The ZXDH is exposed as a vdev device which consists of MPF devices.

+On EAL initialization, vdev device can be created from the application code by

+

+* Invoking ``rte_vdev_init("rawdev_zxdh_gdma")`` from the application

+

+* Using ``--vdev="rawdev_zxdh_gdma"`` in the EAL options, which will call

+  rte_vdev_init() internally

+

+

+Platform Requirement

+~~~~~~~~~~~~~~~~~~~~

+

+This PMD is only supported on ZTE Neo Platforms:

+- Neo X510/X512

diff --git a/drivers/raw/meson.build b/drivers/raw/meson.build

index 05cad143fe..237d1bdd80 100644

--- a/drivers/raw/meson.build

+++ b/drivers/raw/meson.build

@@ -12,5 +12,6 @@ drivers = [

         'ifpga',

         'ntb',

         'skeleton',

+        'zxdh',

 ]

 std_deps = ['rawdev']

diff --git a/drivers/raw/zxdh/meson.build b/drivers/raw/zxdh/meson.build

new file mode 100644

index 0000000000..6cd9521387

--- /dev/null

+++ b/drivers/raw/zxdh/meson.build

@@ -0,0 +1,5 @@

+#SPDX-License-Identifier: BSD-3-Clause

+#Copyright 2024 ZTE Corporation

+

+deps += ['rawdev', 'kvargs', 'mbuf', 'bus_vdev']

+sources = files('zxdh_rawdev.c', 'zxdh_pci.c')

diff --git a/drivers/raw/zxdh/zxdh_pci.c b/drivers/raw/zxdh/zxdh_pci.c

new file mode 100644

index 0000000000..4debddb0d2

--- /dev/null

+++ b/drivers/raw/zxdh/zxdh_pci.c

@@ -0,0 +1,564 @@

+/* SPDX-License-Identifier: BSD-3-Clause

+ * Copyright 2024 ZTE Corporation

+ */

+

+#include <stdio.h>

+#include <stdlib.h>

+#include <string.h>

+#include <getopt.h>

+#include <sys/mman.h>

+#include <fcntl.h>

+#include <errno.h>

+#include <dirent.h>

+#include <unistd.h>

+

+#include "zxdh_rawdev.h"

+#include "zxdh_pci.h"

+

+#define PCI_DEVICES_DIR          "/sys/bus/pci/devices"

+

+#define BAR0_IDX                 (0)

+#define BAR2_IDX                 (2)

+

+#define IORESOURCE_MEM           (0x00000200)

+#define PCI_RESOURCE_FMT_NVAL    (3)

+#define PCI_FMT_NVAL             (4)

+#define FILE_FMT_NVAL            (2)

+

+#define STR_BUFF_LEN             (128)

+

+#define BYTES_NO_SWAP            (0)

+#define BYTES_SWAP               (1)

+

+#define PCI_CMD_OFFSET           (0x04)

+#define PCI_CMD_BYTES            (2)

+#define PCI_CMD_MSE_BIT          (1)

+#define FPGA_VER_OFFSET          (0x420)

+#define FPGA_VER_BYTES           (4)

+#define BOM_ID_OFFSET            (0x424)

+#define BOM_ID_BYTES             (1)

+#define FPGA_PR_FLAG_OFFSET      (0x425)

+#define FPGA_PR_FLAG_BYTES       (1)

+#define BOARD_ID_OFFSET          (0x426)

+#define BOARD_ID_BYTES           (2)

+#define FPGA_MAKE_TIME_OFFSET    (0x428)

+#define FPGA_MAKE_TIME_BYTES     (4)

+

+#define PARA_PR_FLAG             (0)

+#define PARA_FPGA_VER            (1)

+#define PARA_FPGA_MAKE_TIME      (2)

+#define PARA_BOARD_ID            (3)

+#define PARA_BOM_ID              (4)

+#define PARA_PCI_CMD             (5)

+

+#define PCI_READ                 (0)

+#define PCI_WRITE                (1)

+

+struct zxdh_pci_dev gdev;

+

+static int

+zxdh_gdma_rw_pci_config(struct zxdh_pci_dev *dev, uint8_t rw, uint offset, uint count, uint8_t *buf)

+{

+ int fd = -1;

+ uint res = 0;

+ int ret = -1;

+ char filename[FILE_PATH_LEN] = {0};

+

+ snprintf(filename, sizeof(filename), "/proc/bus/pci/%02x/%02x.%d",

+ dev->bus, dev->devid, dev->function);

+ fd = open(filename, O_RDWR);

+ if (fd < 0) {

+ snprintf(filename, sizeof(filename), "/proc/bus/pci/%04x:%02x/%02x.%d",

+ dev->domain, dev->bus, dev->devid, dev->function);

+ fd = open(filename, O_RDWR);

+ if (fd < 0) {

+ ZXDH_PMD_LOG(ERR, "Failed to open file:%s, fd:%d!", filename, fd);

+ return -1;

+ }

+ }

+

+ res = lseek(fd, offset, SEEK_SET);

+ if (res != offset) {

+ close(fd);

+ ZXDH_PMD_LOG(ERR, "Failed to lseek pci, res:%d!", res);

+ return -1;

+ }

+

+ if (rw == PCI_READ)

+ ret = read(fd, buf, count);

+ else

+ ret = write(fd, buf, count);

+

+ if (ret < 0) {

+ close(fd);

+ ZXDH_PMD_LOG(ERR, "Failed to rw pci:%d, ret:%d!", rw, ret);

+ return -1;

+ }

+

+ close(fd);

+ return 0;

+}

+

+static int

+zxdh_gdma_cfg_space_read(struct zxdh_pci_dev *dev, uint8_t ParaType, uint *pParaVer)

+{

+ int ret = 0;

+ uint8_t aRegVal[sizeof(uint)] = {0};

+ uint8_t ucLoop = 0;

+ uint8_t ucSwap = BYTES_NO_SWAP;

+ uint dwRegOffset = 0;

+ uint dwRegLen = 0;

+

+ if ((dev == NULL) || (pParaVer == NULL)) {

+ ZXDH_PMD_LOG(ERR, "Param is invalid!");

+ return -EINVAL;

+ }

+

+ switch (ParaType) {

+ case PARA_PR_FLAG:

+ dwRegOffset = FPGA_PR_FLAG_OFFSET;

+ dwRegLen    = FPGA_PR_FLAG_BYTES;

+ ucSwap      = BYTES_NO_SWAP;

+ break;

+ case PARA_FPGA_VER:

+ dwRegOffset = FPGA_VER_OFFSET;

+ dwRegLen    = FPGA_VER_BYTES;

+ ucSwap      = BYTES_NO_SWAP;

+ break;

+ case PARA_FPGA_MAKE_TIME:

+ dwRegOffset = FPGA_MAKE_TIME_OFFSET;

+ dwRegLen    = FPGA_MAKE_TIME_BYTES;

+ ucSwap      = BYTES_NO_SWAP;

+ break;

+ case PARA_BOARD_ID:

+ dwRegOffset = BOARD_ID_OFFSET;

+ dwRegLen    = BOARD_ID_BYTES;

+ ucSwap      = BYTES_NO_SWAP;

+ break;

+ case PARA_BOM_ID:

+ dwRegOffset = BOM_ID_OFFSET;

+ dwRegLen    = BOM_ID_BYTES;

+ ucSwap      = BYTES_NO_SWAP;

+ break;

+ case PARA_PCI_CMD:

+ dwRegOffset = PCI_CMD_OFFSET;

+ dwRegLen    = PCI_CMD_BYTES;

+ ucSwap      = BYTES_SWAP;

+ break;

+ default:

+ ZXDH_PMD_LOG(ERR, "ParaType %u not support!", ParaType);

+ return -EINVAL;

+ }

+

+ if (dwRegLen > sizeof(uint)) {

+ ZXDH_PMD_LOG(ERR, "dwRegLen %u is invalid", dwRegLen);

+ return -1;

+ }

+

+ *pParaVer = 0;

+ ret = zxdh_gdma_rw_pci_config(dev, PCI_READ, dwRegOffset, dwRegLen, aRegVal);

+ if (ret != 0) {

+ ZXDH_PMD_LOG(ERR, "ParaType %u, zxdh_gdma_rw_pci_config failed!", ParaType);

+ return ret;

+ }

+

+ if (ucSwap == BYTES_SWAP) {

+ for (ucLoop = 0; ucLoop < dwRegLen; ucLoop++)

+ *pParaVer = (*pParaVer << 8) | aRegVal[dwRegLen-1-ucLoop];

+ } else {

+ for (ucLoop = 0; ucLoop < dwRegLen; ucLoop++)

+ *pParaVer = (*pParaVer << 8) | aRegVal[ucLoop];

+ }

+

+ return ret;

+}

+

+static int

+zxdh_gdma_cfg_space_write(struct zxdh_pci_dev *dev, uint8_t ParaType, uint *pParaVer)

+{

+ int ret = 0;

+ uint8_t aRegVal[sizeof(uint)] = {0};

+ uint8_t ucLoop = 0;

+ uint8_t ucSwap = BYTES_NO_SWAP;

+ uint dwRegOffset = 0;

+ uint dwRegLen = 0;

+

+ if ((dev == NULL) || (pParaVer == NULL)) {

+ ZXDH_PMD_LOG(ERR, "Param is invalid");

+ return -EINVAL;

+ }

+

+ if (ParaType != PARA_PCI_CMD) {

+ ZXDH_PMD_LOG(ERR, "ParaType %u not support!", ParaType);

+ return -EINVAL;

+ }

+

+ dwRegOffset = PCI_CMD_OFFSET;

+ dwRegLen = PCI_CMD_BYTES;

+ ucSwap = BYTES_SWAP;

+

+ if (dwRegLen > sizeof(uint)) {

+ ZXDH_PMD_LOG(ERR, "dwRegLen %u is invalid", dwRegLen);

+ return -1;

+ }

+

+ if (ucSwap == BYTES_SWAP) {

+ for (ucLoop = 0; ucLoop < dwRegLen; ucLoop++)

+ aRegVal[ucLoop] = (*pParaVer >> 8*ucLoop) & 0xff;

+ } else {

+ for (ucLoop = 0; ucLoop < dwRegLen; ucLoop++)

+ aRegVal[ucLoop] = (*pParaVer >> 8*(dwRegLen-1-ucLoop)) & 0xff;

+ }

+

+ ret = zxdh_gdma_rw_pci_config(dev, PCI_WRITE, dwRegOffset, dwRegLen, aRegVal);

+ if (ret != 0) {

+ ZXDH_PMD_LOG(ERR, "ParaType %u, zxdh_gdma_rw_pci_config failed!", ParaType);

+ return ret;

+ }

+

+ return ret;

+}

+

+static int

+zxdh_gdma_str_split(char *string, int stringlen, char **tokens, int maxtokens, char delim)

+{

+ int loop = 0;

+ int tok = 0;

+ int tokstart = 1; /* first token is right at start of string */

+

+ if (string == NULL || tokens == NULL) {

+ ZXDH_PMD_LOG(ERR, "Param is invalid!");

+ return -1;

+ }

+

+ for (loop = 0; loop < stringlen; loop++) {

+ if (string[loop] == '\0' || tok >= maxtokens)

+ break;

+

+ if (tokstart) {

+ tokstart = 0;

+ tokens[tok++] = &string[loop];

+ }

+

+ if (string[loop] == delim) {

+ string[loop] = '\0';

+ tokstart = 1;

+ }

+ }

+

+ return tok;

+}

+

+static int

+zxdh_gdma_devfs_parse(const char *filename, unsigned long *val)

+{

+ FILE *f = NULL;

+ char *end = NULL;

+ char buf[STR_BUFF_LEN] = {0};

+

+ f = fopen(filename, "r");

+ if (f == NULL) {

+ ZXDH_PMD_LOG(ERR, "Cannot open sysfs %s", filename);

+ return -1;

+ }

+

+ if (fgets(buf, sizeof(buf), f) == NULL) {

+ ZXDH_PMD_LOG(ERR, "Cannot read sysfs value %s", filename);

+ fclose(f);

+ return -1;

+ }

+

+ *val = strtoul(buf, &end, 0);

+ if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {

+ ZXDH_PMD_LOG(ERR, "Cannot parse sysfs value %s", filename);

+ fclose(f);

+ return -1;

+ }

+

+ fclose(f);

+ return 0;

+}

+

+static int

+zxdh_gdma_resfs_parse(const char *filename, struct zxdh_pci_dev *dev)

+{

+ FILE *fp = NULL;

+ char buf[STR_BUFF_LEN] = {0};

+ uint8_t  loop = 0;

+ uint64_t phys_addr = 0;

+ uint64_t end_addr = 0;

+ uint64_t flags = 0;

+ int ret = 0;

+ union pci_resource_info {

+ struct {

+ char *phys_addr;

+ char *end_addr;

+ char *flags;

+ };

+ char *ptrs[PCI_RESOURCE_FMT_NVAL];

+ } res_info;

+

+ fp = fopen(filename, "r");

+ if (fp == NULL) {

+ ZXDH_PMD_LOG(ERR, "Failed to open file %s", filename);

+ return -1;

+ }

+

+ for (loop = 0; loop < PCI_MAX_RESOURCE; loop++) {

+ if (fgets(buf, sizeof(buf), fp) == NULL) {

+ ZXDH_PMD_LOG(ERR, "Failed to gets file %s", filename);

+ goto err_exit;

+ }

+

+ ret = zxdh_gdma_str_split(buf, sizeof(buf), res_info.ptrs,

+ PCI_RESOURCE_FMT_NVAL, ' ');

+ if (ret != PCI_RESOURCE_FMT_NVAL) {

+ ZXDH_PMD_LOG(ERR, "file %s:zxdh_gdma_str_split failed!", filename);

+ goto err_exit;

+ }

+ errno = 0;

+ phys_addr = strtoull(res_info.phys_addr, NULL, 16);

+ end_addr  = strtoull(res_info.end_addr, NULL, 16);

+ flags     = strtoull(res_info.flags, NULL, 16);

+

+ if (errno != 0) {

+ ZXDH_PMD_LOG(ERR, "file %s:bad resource format!", filename);

+ goto err_exit;

+ }

+

+ if (flags & IORESOURCE_MEM) {

+ if (loop == BAR0_IDX) {

+ dev->bar_pa[BAR0_IDX] = phys_addr;

+ dev->bar_len[BAR0_IDX] = end_addr - phys_addr + 1;

+ }

+

+ if (loop == BAR2_IDX) {

+ dev->bar_pa[BAR2_IDX] = phys_addr;

+ dev->bar_len[BAR2_IDX] = end_addr - phys_addr + 1;

+ fclose(fp);

+ return 0;

+ }

+ }

+ }

+

+ ZXDH_PMD_LOG(ERR, "file %s: Not found IO resource memory!", filename);

+

+err_exit:

+ fclose(fp);

+ return -1;

+}

+

+static int

+zxdh_gdma_pci_addr_parse(const char *buf, int buf_size, struct zxdh_pci_dev *dev)

+{

+ char *buf_copy = NULL;

+ int ret = 0;

+ union splitaddr {

+ struct {

+ char *domain;

+ char *bus;

+ char *devid;

+ char *function;

+ };

+ char *str[PCI_FMT_NVAL];

+ } splitaddr;

+

+ buf_copy = strndup(buf, buf_size);

+ if (buf_copy == NULL) {

+ ZXDH_PMD_LOG(ERR, "buf %s: strndup failed!", buf);

+ return -1;

+ }

+

+ /* first split on ':' */

+ ret = zxdh_gdma_str_split(buf_copy, buf_size, splitaddr.str, PCI_FMT_NVAL, ':');

+ if (ret != (PCI_FMT_NVAL - 1)) {

+ ZXDH_PMD_LOG(ERR, "buf %s: zxdh_gdma_str_split failed!", buf);

+ goto err_exit;

+ }

+

+ /* final split is on '.' between devid and function */

+ splitaddr.function = strchr(splitaddr.devid, '.');

+ if (splitaddr.function == NULL) {

+ ZXDH_PMD_LOG(ERR, "buf %s: strchr failed!", buf);

+ goto err_exit;

+ }

+ *splitaddr.function++ = '\0';

+

+ /* now convert to int values */

+ errno = 0;

+ dev->domain = (uint16_t)strtoul(splitaddr.domain, NULL, 16);

+ dev->bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16);

+ dev->devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16);

+ dev->function = (uint8_t)strtoul(splitaddr.function, NULL, 10);

+ if (errno != 0) {

+ ZXDH_PMD_LOG(ERR, "buf %s: bad format!", buf);

+ goto err_exit;

+ }

+ free(buf_copy);

+ return 0;

+

+err_exit:

+ free(buf_copy);

+ return -1;

+}

+

+static int

+zxdh_gdma_pci_dev_mmap(const char *filename, struct zxdh_pci_dev *dev, uint8_t bar_idx)

+{

+ int fd = -1;

+

+ if (dev->bar_va[bar_idx] == NULL) {

+ fd = open(filename, O_RDWR);

+ if (fd < 0) {

+ ZXDH_PMD_LOG(ERR, "Failed to open file %s", filename);

+ return -1;

+ }

+

+ dev->bar_va[bar_idx] = mmap((void *)dev->bar_pa[bar_idx],

+ dev->bar_len[bar_idx],

+ PROT_READ | PROT_WRITE,

+ MAP_SHARED, fd, 0);

+

+ if (dev->bar_va[bar_idx] == MAP_FAILED) {

+ ZXDH_PMD_LOG(ERR, "Failed to mmap file %s!", filename);

+ goto err_exit;

+ }

+ close(fd);

+ } else

+ ZXDH_PMD_LOG(ERR, "BarVirtAddr is not NULL!");

+

+ return 0;

+

+err_exit:

+ close(fd);

+ return -1;

+}

+

+void

+zxdh_gdma_pci_dev_munmap(void)

+{

+ if (gdev.bar_va[BAR0_IDX] != NULL) {

+ munmap(gdev.bar_va[BAR0_IDX], gdev.bar_len[BAR0_IDX]);

+ gdev.bar_va[BAR0_IDX] = NULL;

+ }

+

+ if (gdev.bar_va[BAR2_IDX] != NULL) {

+ munmap(gdev.bar_va[BAR2_IDX], gdev.bar_len[BAR2_IDX]);

+ gdev.bar_va[BAR2_IDX] = NULL;

+ }

+}

+

+static int

+zxdh_gdma_pci_mse_en(struct zxdh_pci_dev *dev)

+{

+ int ret = 0;

+ uint RegVal = 0;

+

+ ret = zxdh_gdma_cfg_space_read(dev, PARA_PCI_CMD, &RegVal);

+ if (ret != 0) {

+ ZXDH_PMD_LOG(ERR, "Failed to read %04x:%02x:%02x.%01x pci config space!",

+ dev->domain, dev->bus, dev->devid, dev->function);

+ return ret;

+ }

+

+ if ((RegVal & (1 << PCI_CMD_MSE_BIT)) == 0) {

+ RegVal = RegVal | (1 << PCI_CMD_MSE_BIT);

+

+ ret = zxdh_gdma_cfg_space_write(dev, PARA_PCI_CMD, &RegVal);

+ if (ret != 0) {

+ ZXDH_PMD_LOG(ERR, "Failed to write %04x:%02x:%02x.%01x pci config space!",

+ dev->domain, dev->bus,

+ dev->devid, dev->function);

+ return ret;

+ }

+ }

+

+ return ret;

+}

+

+int

+zxdh_gdma_pci_scan(void)

+{

+ struct dirent *e = NULL;

+ DIR *dir = NULL;

+ char dirname[FILE_PATH_LEN] = {0};

+ char filename[FILE_PATH_LEN] = {0};

+ uint16_t vendor_id = 0;

+ uint16_t device_id = 0;

+ unsigned long tmp = 0;

+ bool found = false;

+ int ret = 0;

+

+ dir = opendir(PCI_DEVICES_DIR);

+ if (dir == NULL) {

+ ZXDH_PMD_LOG(ERR, "Failed to opendir %s", PCI_DEVICES_DIR);

+ return -1;

+ }

+

+ while ((e = readdir(dir)) != NULL) {

+ if (e->d_name[0] == '.')

+ continue;

+

+ memset(dirname, 0, FILE_PATH_LEN);

+ snprintf(dirname, FILE_PATH_LEN, "%s/%s", PCI_DEVICES_DIR, e->d_name);

+

+ snprintf(filename, sizeof(filename), "%s/vendor", dirname);

+ ret = zxdh_gdma_devfs_parse(filename, &tmp);

+ if (ret != 0)

+ goto out;

+

+ vendor_id = (uint16_t)tmp;

+

+ snprintf(filename, sizeof(filename), "%s/device", dirname);

+ ret = zxdh_gdma_devfs_parse(filename, &tmp);

+ if (ret != 0)

+ goto out;

+

+ device_id = (uint16_t)tmp;

+

+ if ((vendor_id == ZXDH_GDMA_VENDORID) && (device_id == ZXDH_GDMA_DEVICEID)) {

+ found = true;

+ break;

+ }

+ }

+

+ if (found != true) {

+ ZXDH_PMD_LOG(ERR, "Failed to found gdma pci dev");

+ ret = -1;

+ goto out;

+ }

+

+ gdev.vendor_id = vendor_id;

+ gdev.device_id = device_id;

+ memcpy(gdev.d_name, e->d_name, PCI_BUFF_LEN);

+ memcpy(gdev.dirname, dirname, FILE_PATH_LEN);

+ ZXDH_PMD_LOG(INFO, "Found gdma pci dev %s", e->d_name);

+

+ /* Parse pci addr */

+ ret = zxdh_gdma_pci_addr_parse(e->d_name, sizeof(e->d_name), &gdev);

+ if (ret != 0)

+ goto out;

+

+ /* Enable MSE */

+ ret = zxdh_gdma_pci_mse_en(&gdev);

+ if (ret != 0)

+ goto out;

+

+ /* Get bar0 phyaddr and len */

+ snprintf(filename, sizeof(filename), "%s/resource", dirname);

+ ret = zxdh_gdma_resfs_parse(filename, &gdev);

+ if (ret != 0)

+ goto out;

+

+ /* Mmap bar0 virtaddr */

+ snprintf(filename, sizeof(filename), "%s/resource0", dirname);

+ ret = zxdh_gdma_pci_dev_mmap(filename, &gdev, BAR0_IDX);

+ if (ret != 0)

+ goto out;

+

+ ZXDH_PMD_LOG(INFO, "Found pci_scan success");

+

+out:

+ closedir(dir);

+ return ret;

+}

diff --git a/drivers/raw/zxdh/zxdh_pci.h b/drivers/raw/zxdh/zxdh_pci.h

new file mode 100644

index 0000000000..93c6c97405

--- /dev/null

+++ b/drivers/raw/zxdh/zxdh_pci.h

@@ -0,0 +1,40 @@

+/* SPDX-License-Identifier: BSD-3-Clause

+ * Copyright 2024 ZTE Corporation

+ */

+

+#ifndef __ZXDH_PCI_H__

+#define __ZXDH_PCI_H__

+

+#ifdef __cplusplus

+extern "C" {

+#endif

+

+#define FILE_PATH_LEN                       (100)

+#define PCI_BUFF_LEN                        (16)

+#define PCI_MAX_RESOURCE                    (6)

+

+struct zxdh_pci_dev {

+ uint16_t    vendor_id;

+ uint16_t    device_id;

+ uint16_t    domain;

+ uint8_t     bus;

+ uint8_t     devid;

+ uint8_t     function;

+ char        dirname[FILE_PATH_LEN];

+ char        d_name[PCI_BUFF_LEN];

+ void       *bar_va[PCI_MAX_RESOURCE];

+ uint64_t    bar_pa[PCI_MAX_RESOURCE];

+ uint64_t    bar_len[PCI_MAX_RESOURCE];

+};

+

+extern struct zxdh_pci_dev gdev;

+

+void zxdh_gdma_pci_dev_munmap(void);

+int zxdh_gdma_pci_scan(void);

+

+#ifdef __cplusplus

+}

+#endif

+

+#endif /* __ZXDH_PCI_H__ */

+

diff --git a/drivers/raw/zxdh/zxdh_rawdev.c b/drivers/raw/zxdh/zxdh_rawdev.c

new file mode 100644

index 0000000000..32017f8744

--- /dev/null

+++ b/drivers/raw/zxdh/zxdh_rawdev.c

@@ -0,0 +1,911 @@

+/* SPDX-License-Identifier: BSD-3-Clause

+ * Copyright 2024 ZTE Corporation

+ */

+

+#include <assert.h>

+#include <stdio.h>

+#include <stdbool.h>

+#include <errno.h>

+#include <stdint.h>

+#include <inttypes.h>

+#include <string.h>

+#include <time.h>

+#include <sys/types.h>

+

+#include <rte_byteorder.h>

+#include <rte_version.h>

+#include <rte_errno.h>

+#include <rte_common.h>

+#include <rte_debug.h>

+#include <rte_dev.h>

+#include <rte_eal.h>

+#include <rte_kvargs.h>

+#include <rte_log.h>

+#include <rte_malloc.h>

+#include <rte_memory.h>

+#include <rte_memcpy.h>

+#include <rte_lcore.h>

+#include <rte_cycles.h>

+#include <rte_memzone.h>

+#include <rte_atomic.h>

+#include <rte_rawdev.h>

+#include <rte_rawdev_pmd.h>

+

+#if RTE_VERSION_NUM(22, 11, 0, 0) <= RTE_VERSION

+#include <bus_vdev_driver.h>

+#else

+#include <rte_bus_vdev.h>

+#endif

+

+#include "zxdh_rawdev.h"

+#include "zxdh_pci.h"

+

+/*

+ * User define:

+ * ep_id-bit[15:12] vfunc_num-bit[11:4] func_num-bit[3:1] vfunc_active-bit0

+ * host ep_id:5~8   zf ep_id:9

+ */

+#define ZXDH_GDMA_ZF_USER                       0x9000      /* ep4 pf0 */

+#define ZXDH_GDMA_PF_NUM_SHIFT                  1

+#define ZXDH_GDMA_VF_NUM_SHIFT                  4

+#define ZXDH_GDMA_EP_ID_SHIFT                   12

+#define ZXDH_GDMA_VF_EN                         1

+#define ZXDH_GDMA_EPID_OFFSET                   5

+

+/* Register offset */

+#define ZXDH_GDMA_BASE_OFFSET                   0x100000

+#define ZXDH_GDMA_EXT_ADDR_OFFSET               0x218

+#define ZXDH_GDMA_SAR_LOW_OFFSET                0x200

+#define ZXDH_GDMA_DAR_LOW_OFFSET                0x204

+#define ZXDH_GDMA_SAR_HIGH_OFFSET               0x234

+#define ZXDH_GDMA_DAR_HIGH_OFFSET               0x238

+#define ZXDH_GDMA_XFERSIZE_OFFSET               0x208

+#define ZXDH_GDMA_CONTROL_OFFSET                0x230

+#define ZXDH_GDMA_TC_STATUS_OFFSET              0x0

+#define ZXDH_GDMA_STATUS_CLEAN_OFFSET           0x80

+#define ZXDH_GDMA_LLI_L_OFFSET                  0x21c

+#define ZXDH_GDMA_LLI_H_OFFSET                  0x220

+#define ZXDH_GDMA_CHAN_CONTINUE_OFFSET          0x224

+#define ZXDH_GDMA_TC_CNT_OFFSET                 0x23c

+#define ZXDH_GDMA_LLI_USER_OFFSET               0x228

+

+/* Control register */

+#define ZXDH_GDMA_CHAN_ENABLE                   0x1

+#define ZXDH_GDMA_CHAN_DISABLE                  0

+#define ZXDH_GDMA_SOFT_CHAN                     0x2

+#define ZXDH_GDMA_TC_INTR_ENABLE                0x10

+#define ZXDH_GDMA_ALL_INTR_ENABLE               0x30

+#define ZXDH_GDMA_SBS_SHIFT                     6           /* src burst size */

+#define ZXDH_GDMA_SBL_SHIFT                     9           /* src burst length */

+#define ZXDH_GDMA_DBS_SHIFT                     13          /* dest burst size */

+#define ZXDH_GDMA_BURST_SIZE_MIN                0x1         /* 1 byte */

+#define ZXDH_GDMA_BURST_SIZE_MEDIUM             0x4         /* 4 word */

+#define ZXDH_GDMA_BURST_SIZE_MAX                0x6         /* 16 word */

+#define ZXDH_GDMA_DEFAULT_BURST_LEN             0xf         /* 16 beats */

+#define ZXDH_GDMA_TC_CNT_ENABLE                 (1 << 27)

+#define ZXDH_GDMA_CHAN_FORCE_CLOSE              (1 << 31)

+

+/* TC count & Error interrupt status register */

+#define ZXDH_GDMA_SRC_LLI_ERR                   (1 << 16)

+#define ZXDH_GDMA_SRC_DATA_ERR                  (1 << 17)

+#define ZXDH_GDMA_DST_ADDR_ERR                  (1 << 18)

+#define ZXDH_GDMA_ERR_STATUS                    (1 << 19)

+#define ZXDH_GDMA_ERR_INTR_ENABLE               (1 << 20)

+#define ZXDH_GDMA_TC_CNT_CLEAN                  (1)

+

+#define ZXDH_GDMA_CHAN_SHIFT                    0x80

+#define ZXDH_GDMA_LINK_END_NODE                 (1 << 30)

+#define ZXDH_GDMA_CHAN_CONTINUE                 (1)

+

+#define LOW32_MASK                              0xffffffff

+#define LOW16_MASK                              0xffff

+

+#define ZXDH_GDMA_WAIT_TIMES_MAX                100

+#define ZXDH_GDMA_TC_CNT_MAX                    0x10000

+

+#define IDX_TO_ADDR(addr, idx, t) \

+ ((t)((uint8_t *)(addr) + (idx) * sizeof(struct zxdh_gdma_buff_desc)))

+

+static int zxdh_gdma_queue_init(struct rte_rawdev *dev, uint16_t queue_id);

+static int zxdh_gdma_queue_free(struct rte_rawdev *dev, uint16_t queue_id);

+

+char zxdh_gdma_driver_name[] = "rawdev_zxdh_gdma";

+char dev_name[] = "zxdh_gdma";

+

+static inline struct zxdh_gdma_queue *

+zxdh_gdma_get_queue(struct rte_rawdev *dev, uint16_t queue_id)

+{

+ struct zxdh_gdma_rawdev *gdmadev = zxdh_gdma_rawdev_get_priv(dev);

+

+ if (queue_id >= ZXDH_GDMA_TOTAL_CHAN_NUM) {

+ ZXDH_PMD_LOG(ERR, "queue id %d is invalid", queue_id);

+ return NULL;

+ }

+

+ return &(gdmadev->vqs[queue_id]);

+}

+

+uint

+zxdh_gdma_read_reg(struct rte_rawdev *dev, uint16_t queue_id, uint offset)

+{

+ struct zxdh_gdma_rawdev *gdmadev = zxdh_gdma_rawdev_get_priv(dev);

+ uint addr = 0;

+ uint val = 0;

+

+ addr = offset + queue_id * ZXDH_GDMA_CHAN_SHIFT;

+ val = *(uint *)(gdmadev->base_addr + addr);

+

+ return val;

+}

+

+void

+zxdh_gdma_write_reg(struct rte_rawdev *dev, uint16_t queue_id, uint offset, uint val)

+{

+ struct zxdh_gdma_rawdev *gdmadev = zxdh_gdma_rawdev_get_priv(dev);

+ uint addr = 0;

+

+ addr = offset + queue_id * ZXDH_GDMA_CHAN_SHIFT;

+ *(uint *)(gdmadev->base_addr + addr) = val;

+}

+

+int

+zxdh_gdma_debug_info_dump(struct rte_rawdev *dev, uint16_t queue_id)

+{

+ struct zxdh_gdma_queue *queue = NULL;

+ struct zxdh_gdma_buff_desc *bd = NULL;

+ struct zxdh_gdma_job *job = NULL;

+ uint16_t i = 0;

+

+ if (dev == NULL)

+ return -EINVAL;

+

+ queue = zxdh_gdma_get_queue(dev, queue_id);

+ if (queue == NULL)

+ return -EINVAL;

+

+ ZXDH_PMD_LOG(INFO, "###dump sw_ring info###");

+ ZXDH_PMD_LOG(INFO, "free_cnt:%u deq_cnt:%u",

+ queue->sw_ring.free_cnt, queue->sw_ring.deq_cnt);

+ ZXDH_PMD_LOG(INFO, "enq_idx:%u deq_idx:%u used_idx:%u",

+ queue->sw_ring.enq_idx, queue->sw_ring.deq_idx,

+ queue->sw_ring.used_idx);

+ for (i = 0; i < ZXDH_GDMA_QUEUE_SIZE; i++) {

+ if (queue->sw_ring.job[i] != NULL) {

+ job = queue->sw_ring.job[i];

+ ZXDH_PMD_LOG(INFO, "idx:%d, SrcAddr:0x%"PRIx64" DstAddr:0x%"PRIx64" len:%u",

+ i, job->src, job->dest, job->len);

+ }

+ }

+

+ ZXDH_PMD_LOG(INFO, "###dump ring info###");

+ ZXDH_PMD_LOG(INFO, "avail_idx:%u tc_cnt:%u", queue->ring.avail_idx, queue->tc_cnt);

+ for (i = 0; i < ZXDH_GDMA_RING_SIZE; i++) {

+ bd = IDX_TO_ADDR(queue->ring.desc, i, struct zxdh_gdma_buff_desc*);

+ ZXDH_PMD_LOG(INFO, "idx:%d Src:0x%"PRIx64" Dst:0x%"PRIx64" LLI_L:0x%x LLI_H:0x%x ctrl:0x%x user:0x%x",

+ i, bd->SrcAddr_L | ((uint64_t)bd->SrcAddr_H << 32),

+ bd->DstAddr_L | ((uint64_t)bd->DstAddr_H << 32),

+ bd->LLI_Addr_L, bd->LLI_Addr_H, bd->Control, bd->ExtAddr);

+ }

+

+ return 0;

+}

+

+static int

+zxdh_gdma_rawdev_info_get(struct rte_rawdev *dev,

+   __rte_unused rte_rawdev_obj_t dev_info,

+   __rte_unused size_t dev_info_size)

+{

+ if (dev == NULL)

+ return -EINVAL;

+

+ return 0;

+}

+

+static int

+zxdh_gdma_rawdev_configure(const struct rte_rawdev *dev,

+    rte_rawdev_obj_t config,

+    size_t config_size)

+{

+ struct zxdh_gdma_config *gdma_config = NULL;

+

+ if ((dev == NULL) ||

+ (config == NULL) ||

+ (config_size != sizeof(struct zxdh_gdma_config)))

+ return -EINVAL;

+

+ gdma_config = (struct zxdh_gdma_config *)config;

+ if (gdma_config->max_vqs != ZXDH_GDMA_TOTAL_CHAN_NUM) {

+ ZXDH_PMD_LOG(ERR, "gdma only support queue num %d", ZXDH_GDMA_TOTAL_CHAN_NUM);

+ return -EINVAL;

+ }

+

+ return 0;

+}

+

+static int

+zxdh_gdma_rawdev_start(struct rte_rawdev *dev)

+{

+ struct zxdh_gdma_rawdev *gdmadev = NULL;

+

+ if (dev == NULL)

+ return -EINVAL;

+

+ gdmadev = zxdh_gdma_rawdev_get_priv(dev);

+ gdmadev->device_state = ZXDH_GDMA_DEV_RUNNING;

+

+ return 0;

+}

+

+static void

+zxdh_gdma_rawdev_stop(struct rte_rawdev *dev)

+{

+ struct zxdh_gdma_rawdev *gdmadev = NULL;

+

+ if (dev == NULL)

+ return;

+

+ gdmadev = zxdh_gdma_rawdev_get_priv(dev);

+ gdmadev->device_state = ZXDH_GDMA_DEV_STOPPED;

+}

+

+static int

+zxdh_gdma_rawdev_reset(struct rte_rawdev *dev)

+{

+ if (dev == NULL)

+ return -EINVAL;

+

+ return 0;

+}

+

+static int

+zxdh_gdma_rawdev_close(struct rte_rawdev *dev)

+{

+ struct zxdh_gdma_rawdev *gdmadev = NULL;

+ struct zxdh_gdma_queue *queue = NULL;

+ uint16_t queue_id = 0;

+

+ if (dev == NULL)

+ return -EINVAL;

+

+ for (queue_id = 0; queue_id < ZXDH_GDMA_TOTAL_CHAN_NUM; queue_id++) {

+ queue = zxdh_gdma_get_queue(dev, queue_id);

+ if ((queue == NULL) || (queue->enable == 0))

+ continue;

+

+ zxdh_gdma_queue_free(dev, queue_id);

+ }

+ gdmadev = zxdh_gdma_rawdev_get_priv(dev);

+ gdmadev->device_state = ZXDH_GDMA_DEV_STOPPED;

+

+ return 0;

+}

+

+static int

+zxdh_gdma_rawdev_queue_setup(struct rte_rawdev *dev,

+ uint16_t queue_id,

+ rte_rawdev_obj_t queue_conf,

+ size_t conf_size)

+{

+ struct zxdh_gdma_rawdev *gdmadev = NULL;

+ struct zxdh_gdma_queue *queue = NULL;

+ struct zxdh_gdma_queue_config *qconfig = NULL;

+ struct zxdh_gdma_rbp *rbp = NULL;

+ uint16_t i = 0;

+ uint8_t is_txq = 0;

+ uint src_user = 0;

+ uint dst_user = 0;

+

+ if (dev == NULL)

+ return -EINVAL;

+

+ if ((queue_conf == NULL) || (conf_size != sizeof(struct zxdh_gdma_queue_config)))

+ return -EINVAL;

+

+ gdmadev = zxdh_gdma_rawdev_get_priv(dev);

+ qconfig = (struct zxdh_gdma_queue_config *)queue_conf;

+

+ for (i = 0; i < ZXDH_GDMA_TOTAL_CHAN_NUM; i++) {

+ if (gdmadev->vqs[i].enable == 0)

+ break;

+ }

+ if (i >= ZXDH_GDMA_TOTAL_CHAN_NUM) {

+ ZXDH_PMD_LOG(ERR, "Failed to setup queue, no avail queues");

+ return -1;

+ }

+ queue_id = i;

+ if (zxdh_gdma_queue_init(dev, queue_id) != 0) {

+ ZXDH_PMD_LOG(ERR, "Failed to init queue");

+ return -1;

+ }

+ queue = &(gdmadev->vqs[queue_id]);

+

+ rbp = qconfig->rbp;

+ if ((rbp->srbp != 0) && (rbp->drbp == 0)) {

+ is_txq = 0;

+ dst_user = ZXDH_GDMA_ZF_USER;

+ src_user = ((rbp->spfid << ZXDH_GDMA_PF_NUM_SHIFT) |

+ ((rbp->sportid + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));

+

+ if (rbp->svfid != 0)

+ src_user |= (ZXDH_GDMA_VF_EN |

+ ((rbp->svfid - 1) << ZXDH_GDMA_VF_NUM_SHIFT));

+

+ ZXDH_PMD_LOG(DEBUG, "rxq->qidx:%d setup src_user(ep:%d pf:%d vf:%d) success",

+ queue_id, (uint8_t)rbp->sportid, (uint8_t)rbp->spfid,

+ (uint8_t)rbp->svfid);

+ } else if ((rbp->srbp == 0) && (rbp->drbp != 0)) {

+ is_txq = 1;

+ src_user = ZXDH_GDMA_ZF_USER;

+ dst_user = ((rbp->dpfid << ZXDH_GDMA_PF_NUM_SHIFT) |

+ ((rbp->dportid + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));

+

+ if (rbp->dvfid != 0)

+ dst_user |= (ZXDH_GDMA_VF_EN |

+ ((rbp->dvfid - 1) << ZXDH_GDMA_VF_NUM_SHIFT));

+

+ ZXDH_PMD_LOG(DEBUG, "txq->qidx:%d setup dst_user(ep:%d pf:%d vf:%d) success",

+ queue_id, (uint8_t)rbp->dportid, (uint8_t)rbp->dpfid,

+ (uint8_t)rbp->dvfid);

+ } else {

+ ZXDH_PMD_LOG(ERR, "Failed to setup queue, srbp/drbp is invalid");

+ return -EINVAL;

+ }

+ queue->is_txq = is_txq;

+

+ /* setup queue user info */

+ queue->user = (src_user & LOW16_MASK) | (dst_user << 16);

+

+ zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_EXT_ADDR_OFFSET, queue->user);

+ gdmadev->used_num++;

+

+ return queue_id;

+}

+

+static int

+zxdh_gdma_rawdev_queue_release(struct rte_rawdev *dev, uint16_t queue_id)

+{

+ struct zxdh_gdma_queue *queue = NULL;

+

+ if (dev == NULL)

+ return -EINVAL;

+

+ queue = zxdh_gdma_get_queue(dev, queue_id);

+ if ((queue == NULL) || (queue->enable == 0))

+ return -EINVAL;

+

+ zxdh_gdma_queue_free(dev, queue_id);

+

+ return 0;

+}

+

+static int

+zxdh_gdma_rawdev_get_attr(struct rte_rawdev *dev,

+   __rte_unused const char *attr_name,

+   uint64_t *attr_value)

+{

+ struct zxdh_gdma_rawdev *gdmadev = NULL;

+ struct zxdh_gdma_attr *gdma_attr = NULL;

+

+ if ((dev == NULL) || (attr_value == NULL))

+ return -EINVAL;

+

+ gdmadev   = zxdh_gdma_rawdev_get_priv(dev);

+ gdma_attr = (struct zxdh_gdma_attr *)attr_value;

+ gdma_attr->num_hw_queues = gdmadev->used_num;

+

+ return 0;

+}

+

+static inline void

+zxdh_gdma_control_cal(uint *val, uint8_t tc_enable)

+{

+ *val = (ZXDH_GDMA_CHAN_ENABLE |

+ ZXDH_GDMA_SOFT_CHAN |

+ (ZXDH_GDMA_DEFAULT_BURST_LEN << ZXDH_GDMA_SBL_SHIFT) |

+ (ZXDH_GDMA_BURST_SIZE_MAX << ZXDH_GDMA_SBS_SHIFT) |

+ (ZXDH_GDMA_BURST_SIZE_MAX << ZXDH_GDMA_DBS_SHIFT));

+

+ if (tc_enable != 0)

+ *val |= ZXDH_GDMA_TC_CNT_ENABLE;

+}

+

+static inline uint

+zxdh_gdma_user_get(struct zxdh_gdma_queue *queue, struct zxdh_gdma_job *job)

+{

+ uint src_user = 0;

+ uint dst_user = 0;

+

+ if ((job->flags & ZXDH_GDMA_JOB_DIR_MASK) == 0) {

+ ZXDH_PMD_LOG(DEBUG, "job flags:0x%x default user:0x%x\n",

+ job->flags, queue->user);

+ return queue->user;

+ } else if ((job->flags & ZXDH_GDMA_JOB_DIR_TX) != 0) {

+ src_user = ZXDH_GDMA_ZF_USER;

+ dst_user = ((job->pf_id << ZXDH_GDMA_PF_NUM_SHIFT) |

+ ((job->ep_id + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));

+

+ if (job->vf_id != 0)

+ dst_user |= (ZXDH_GDMA_VF_EN |

+ ((job->vf_id - 1) << ZXDH_GDMA_VF_NUM_SHIFT));

+ } else {

+ dst_user = ZXDH_GDMA_ZF_USER;

+ src_user = ((job->pf_id << ZXDH_GDMA_PF_NUM_SHIFT) |

+ ((job->ep_id + ZXDH_GDMA_EPID_OFFSET) << ZXDH_GDMA_EP_ID_SHIFT));

+

+ if (job->vf_id != 0)

+ src_user |= (ZXDH_GDMA_VF_EN |

+ ((job->vf_id - 1) << ZXDH_GDMA_VF_NUM_SHIFT));

+ }

+ ZXDH_PMD_LOG(DEBUG, "job flags:0x%x ep_id:%u, pf_id:%u, vf_id:%u, user:0x%x\n",

+ job->flags, job->ep_id, job->pf_id, job->vf_id,

+ (src_user & LOW16_MASK) | (dst_user << 16));

+

+ return (src_user & LOW16_MASK) | (dst_user << 16);

+}

+

+static inline void

+zxdh_gdma_fill_bd(struct zxdh_gdma_queue *queue, struct zxdh_gdma_job *job)

+{

+ struct zxdh_gdma_buff_desc *bd = NULL;

+ uint val = 0;

+ uint64_t next_bd_addr = 0;

+ uint16_t avail_idx = 0;

+

+ avail_idx = queue->ring.avail_idx;

+ bd = &(queue->ring.desc[avail_idx]);

+ memset(bd, 0, sizeof(struct zxdh_gdma_buff_desc));

+

+ /* data bd */

+ if (job != NULL) {

+ zxdh_gdma_control_cal(&val, 1);

+ next_bd_addr   = IDX_TO_ADDR(queue->ring.ring_mem,

+ (avail_idx + 1) % ZXDH_GDMA_RING_SIZE,

+ uint64_t);

+ bd->SrcAddr_L  = job->src & LOW32_MASK;

+ bd->DstAddr_L  = job->dest & LOW32_MASK;

+ bd->SrcAddr_H  = (job->src >> 32) & LOW32_MASK;

+ bd->DstAddr_H  = (job->dest >> 32) & LOW32_MASK;

+ bd->Xpara      = job->len;

+ bd->ExtAddr    = zxdh_gdma_user_get(queue, job);

+ bd->LLI_Addr_L = (next_bd_addr >> 6) & LOW32_MASK;

+ bd->LLI_Addr_H = next_bd_addr >> 38;

+ bd->LLI_User   = ZXDH_GDMA_ZF_USER;

+ bd->Control    = val;

+ } else {

+ zxdh_gdma_control_cal(&val, 0);

+ next_bd_addr   = IDX_TO_ADDR(queue->ring.ring_mem, avail_idx, uint64_t);

+ bd->ExtAddr    = queue->user;

+ bd->LLI_User   = ZXDH_GDMA_ZF_USER;

+ bd->Control    = val;

+ bd->LLI_Addr_L = (next_bd_addr >> 6) & LOW32_MASK;

+ bd->LLI_Addr_H = (next_bd_addr >> 38) | ZXDH_GDMA_LINK_END_NODE;

+ if (queue->flag != 0) {

+ bd = IDX_TO_ADDR(queue->ring.desc,

+ queue->ring.last_avail_idx,

+ struct zxdh_gdma_buff_desc*);

+ next_bd_addr = IDX_TO_ADDR(queue->ring.ring_mem,

+ (queue->ring.last_avail_idx + 1) % ZXDH_GDMA_RING_SIZE,

+ uint64_t);

+ bd->LLI_Addr_L  = (next_bd_addr >> 6) & LOW32_MASK;

+ bd->LLI_Addr_H  = next_bd_addr >> 38;

+ rte_wmb();

+ bd->LLI_Addr_H &= ~ZXDH_GDMA_LINK_END_NODE;

+ }

+ /* Record the index of empty bd for dynamic chaining */

+ queue->ring.last_avail_idx = avail_idx;

+ }

+

+ if (++avail_idx >= ZXDH_GDMA_RING_SIZE)

+ avail_idx -= ZXDH_GDMA_RING_SIZE;

+

+ queue->ring.avail_idx = avail_idx;

+}

+

+static int

+zxdh_gdma_rawdev_enqueue_bufs(struct rte_rawdev *dev,

+ __rte_unused struct rte_rawdev_buf **buffers,

+ uint count,

+ rte_rawdev_obj_t context)

+{

+ struct zxdh_gdma_rawdev *gdmadev = NULL;

+ struct zxdh_gdma_queue *queue = NULL;

+ struct zxdh_gdma_enqdeq *e_context = NULL;

+ struct zxdh_gdma_job *job = NULL;

+ uint16_t queue_id = 0;

+ uint val = 0;

+ uint16_t i = 0;

+ uint16_t free_cnt = 0;

+

+ if (dev == NULL)

+ return -EINVAL;

+

+ if (unlikely((count < 1) || (context == NULL)))

+ return -EINVAL;

+

+ gdmadev = zxdh_gdma_rawdev_get_priv(dev);

+ if (gdmadev->device_state == ZXDH_GDMA_DEV_STOPPED) {

+ ZXDH_PMD_LOG(ERR, "gdma dev is stop");

+ return 0;

+ }

+

+ e_context = (struct zxdh_gdma_enqdeq *)context;

+ queue_id = e_context->vq_id;

+ queue = zxdh_gdma_get_queue(dev, queue_id);

+ if ((queue == NULL) || (queue->enable == 0))

+ return -EINVAL;

+

+ free_cnt = queue->sw_ring.free_cnt;

+ if (free_cnt == 0) {

+ ZXDH_PMD_LOG(ERR, "queue %u is full, enq_idx:%u deq_idx:%u used_idx:%u",

+    queue_id, queue->sw_ring.enq_idx,

+    queue->sw_ring.deq_idx, queue->sw_ring.used_idx);

+ return 0;

+ } else if (free_cnt < count) {

+ ZXDH_PMD_LOG(DEBUG, "job num %u > free_cnt, change to %u", count, free_cnt);

+ count = free_cnt;

+ }

+

+ rte_spinlock_lock(&queue->enqueue_lock);

+

+ /* Build bd list, the last bd is empty bd */

+ for (i = 0; i < count; i++) {

+ job = e_context->job[i];

+ zxdh_gdma_fill_bd(queue, job);

+ }

+ zxdh_gdma_fill_bd(queue, NULL);

+

+ if (unlikely(queue->flag == 0)) {

+ zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_LLI_L_OFFSET,

+ (queue->ring.ring_mem >> 6) & LOW32_MASK);

+ zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_LLI_H_OFFSET,

+ queue->ring.ring_mem >> 38);

+ /* Start hardware handling */

+ zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_XFERSIZE_OFFSET, 0);

+ zxdh_gdma_control_cal(&val, 0);

+ zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET, val);

+ queue->flag = 1;

+ } else {

+ val = ZXDH_GDMA_CHAN_CONTINUE;

+ zxdh_gdma_write_reg(dev, queue->vq_id, ZXDH_GDMA_CHAN_CONTINUE_OFFSET, val);

+ }

+

+    /* job enqueue */

+ for (i = 0; i < count; i++) {

+ queue->sw_ring.job[queue->sw_ring.enq_idx] = e_context->job[i];

+ if (++queue->sw_ring.enq_idx >= queue->queue_size)

+ queue->sw_ring.enq_idx -= queue->queue_size;

+

+ free_cnt--;

+ }

+ queue->sw_ring.free_cnt = free_cnt;

+ queue->sw_ring.pend_cnt += count;

+ rte_spinlock_unlock(&queue->enqueue_lock);

+

+ return count;

+}

+

+static inline void

+zxdh_gdma_used_idx_update(struct zxdh_gdma_queue *queue, uint16_t cnt, uint8_t data_bd_err)

+{

+ uint16_t idx = 0;

+

+ if (queue->sw_ring.used_idx + cnt < queue->queue_size)

+ queue->sw_ring.used_idx += cnt;

+ else

+ queue->sw_ring.used_idx = queue->sw_ring.used_idx + cnt - queue->queue_size;

+

+ if (data_bd_err == 1) {

+ /* Update job status, the last job status is error */

+ if (queue->sw_ring.used_idx == 0)

+ idx = queue->queue_size - 1;

+ else

+ idx = queue->sw_ring.used_idx - 1;

+

+ queue->sw_ring.job[idx]->status = 1;

+ }

+}

+

+static int

+zxdh_gdma_rawdev_dequeue_bufs(struct rte_rawdev *dev,

+ __rte_unused struct rte_rawdev_buf **buffers,

+ uint count,

+ rte_rawdev_obj_t context)

+{

+ struct zxdh_gdma_queue *queue = NULL;

+ struct zxdh_gdma_enqdeq *e_context = NULL;

+ uint16_t queue_id = 0;

+ uint val = 0;

+ uint16_t tc_cnt = 0;

+ uint16_t diff_cnt = 0;

+ uint16_t i = 0;

+ uint16_t bd_idx = 0;

+ uint64_t next_bd_addr = 0;

+ uint8_t data_bd_err = 0;

+

+ if ((dev == NULL) || (context == NULL))

+ return -EINVAL;

+

+ e_context = (struct zxdh_gdma_enqdeq *)context;

+ queue_id = e_context->vq_id;

+ queue = zxdh_gdma_get_queue(dev, queue_id);

+ if ((queue == NULL) || (queue->enable == 0))

+ return -EINVAL;

+

+ if (queue->sw_ring.pend_cnt == 0)

+ goto deq_job;

+

+ /* Get data transmit count */

+ val = zxdh_gdma_read_reg(dev, queue_id, ZXDH_GDMA_TC_CNT_OFFSET);

+ tc_cnt = val & LOW16_MASK;

+ if (tc_cnt >= queue->tc_cnt)

+ diff_cnt = tc_cnt - queue->tc_cnt;

+ else

+ diff_cnt = tc_cnt + ZXDH_GDMA_TC_CNT_MAX - queue->tc_cnt;

+

+ queue->tc_cnt = tc_cnt;

+

+ /* Data transmit error, channel stopped */

+ if ((val & ZXDH_GDMA_ERR_STATUS) != 0) {

+ next_bd_addr  = zxdh_gdma_read_reg(dev, queue_id, ZXDH_GDMA_LLI_L_OFFSET);

+ next_bd_addr |= ((uint64_t)zxdh_gdma_read_reg(dev, queue_id,

+ ZXDH_GDMA_LLI_H_OFFSET) << 32);

+ next_bd_addr  = next_bd_addr << 6;

+ bd_idx = (next_bd_addr - queue->ring.ring_mem) / sizeof(struct zxdh_gdma_buff_desc);

+ if ((val & ZXDH_GDMA_SRC_DATA_ERR) || (val & ZXDH_GDMA_DST_ADDR_ERR)) {

+ diff_cnt++;

+ data_bd_err = 1;

+ }

+ ZXDH_PMD_LOG(INFO, "queue%d is err(0x%x) next_bd_idx:%u ll_addr:0x%"PRIx64" def user:0x%x",

+ queue_id, val, bd_idx, next_bd_addr, queue->user);

+ zxdh_gdma_debug_info_dump(dev, queue_id);

+

+ ZXDH_PMD_LOG(INFO, "Clean up error status");

+ val = ZXDH_GDMA_ERR_STATUS | ZXDH_GDMA_ERR_INTR_ENABLE;

+ zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_TC_CNT_OFFSET, val);

+

+ ZXDH_PMD_LOG(INFO, "Restart channel");

+ zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_XFERSIZE_OFFSET, 0);

+ zxdh_gdma_control_cal(&val, 0);

+ zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET, val);

+ }

+

+ if (diff_cnt != 0) {

+ zxdh_gdma_used_idx_update(queue, diff_cnt, data_bd_err);

+ queue->sw_ring.deq_cnt += diff_cnt;

+ queue->sw_ring.pend_cnt -= diff_cnt;

+ }

+

+deq_job:

+ if (queue->sw_ring.deq_cnt == 0)

+ return 0;

+ else if (queue->sw_ring.deq_cnt < count)

+ count = queue->sw_ring.deq_cnt;

+

+ queue->sw_ring.deq_cnt -= count;

+

+ for (i = 0; i < count; i++) {

+ e_context->job[i] = queue->sw_ring.job[queue->sw_ring.deq_idx];

+ queue->sw_ring.job[queue->sw_ring.deq_idx] = NULL;

+ if (++queue->sw_ring.deq_idx >= queue->queue_size)

+ queue->sw_ring.deq_idx -= queue->queue_size;

+ }

+ queue->sw_ring.free_cnt += count;

+

+ return count;

+}

+

+static const struct rte_rawdev_ops zxdh_gdma_rawdev_ops = {

+ .dev_info_get = zxdh_gdma_rawdev_info_get,

+ .dev_configure = zxdh_gdma_rawdev_configure,

+ .dev_start = zxdh_gdma_rawdev_start,

+ .dev_stop = zxdh_gdma_rawdev_stop,

+ .dev_close = zxdh_gdma_rawdev_close,

+ .dev_reset = zxdh_gdma_rawdev_reset,

+

+ .queue_setup = zxdh_gdma_rawdev_queue_setup,

+ .queue_release = zxdh_gdma_rawdev_queue_release,

+

+ .attr_get = zxdh_gdma_rawdev_get_attr,

+

+ .enqueue_bufs = zxdh_gdma_rawdev_enqueue_bufs,

+ .dequeue_bufs = zxdh_gdma_rawdev_dequeue_bufs,

+};

+

+static int

+zxdh_gdma_queue_init(struct rte_rawdev *dev, uint16_t queue_id)

+{

+ char name[RTE_RAWDEV_NAME_MAX_LEN];

+ struct zxdh_gdma_queue *queue = NULL;

+ const struct rte_memzone *mz = NULL;

+ uint size = 0;

+ uint val = 0;

+ int ret = 0;

+

+ queue = zxdh_gdma_get_queue(dev, queue_id);

+ if (queue == NULL)

+ return -EINVAL;

+

+ queue->enable = 1;

+ queue->vq_id  = queue_id;

+ queue->flag   = 0;

+ queue->tc_cnt = 0;

+

+ /* Init sw_ring */

+ memset(name, 0, sizeof(name));

+ snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "gdma_vq%d_sw_ring", queue_id);

+ size = queue->queue_size * sizeof(struct zxdh_gdma_job *);

+ queue->sw_ring.job = rte_zmalloc(name, size, 0);

+ if (queue->sw_ring.job == NULL) {

+ ZXDH_PMD_LOG(ERR, "can not allocate sw_ring %s", name);

+ ret = -ENOMEM;

+ goto free_queue;

+ }

+

+ /* Cache up to size-1 job in the ring to prevent overwriting hardware prefetching */

+ queue->sw_ring.free_cnt = queue->queue_size - 1;

+ queue->sw_ring.deq_cnt  = 0;

+ queue->sw_ring.pend_cnt = 0;

+ queue->sw_ring.enq_idx  = 0;

+ queue->sw_ring.deq_idx  = 0;

+ queue->sw_ring.used_idx = 0;

+

+ /* Init ring */

+ memset(name, 0, sizeof(name));

+ snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "gdma_vq%d_ring", queue_id);

+ size = ZXDH_GDMA_RING_SIZE * sizeof(struct zxdh_gdma_buff_desc);

+ mz = rte_memzone_reserve_aligned(name, size, rte_socket_id(),

+ RTE_MEMZONE_IOVA_CONTIG, size);

+ if (mz == NULL) {

+ if (rte_errno == EEXIST)

+ mz = rte_memzone_lookup(name);

+ if (mz == NULL) {

+ ZXDH_PMD_LOG(ERR, "can not allocate ring %s", name);

+ ret = -ENOMEM;

+ goto free_queue;

+ }

+ }

+ memset(mz->addr, 0, size);

+ queue->ring.ring_mz   = mz;

+ queue->ring.desc      = (struct zxdh_gdma_buff_desc *)(mz->addr);

+ queue->ring.ring_mem  = mz->iova;

+ queue->ring.avail_idx = 0;

+ ZXDH_PMD_LOG(INFO, "queue%u ring phy addr:0x%"PRIx64" virt addr:%p",

+ queue_id, mz->iova, mz->addr);

+

+ /* clean gdma channel */

+ val = ZXDH_GDMA_CHAN_FORCE_CLOSE;

+ zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET, val);

+

+ val = ZXDH_GDMA_ERR_INTR_ENABLE | ZXDH_GDMA_ERR_STATUS | ZXDH_GDMA_TC_CNT_CLEAN;

+ zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_TC_CNT_OFFSET, val);

+

+ val = ZXDH_GDMA_ZF_USER;

+ zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_LLI_USER_OFFSET, val);

+

+ return 0;

+

+free_queue:

+ zxdh_gdma_queue_free(dev, queue_id);

+ return ret;

+}

+

+static int

+zxdh_gdma_queue_free(struct rte_rawdev *dev, uint16_t queue_id)

+{

+ struct zxdh_gdma_rawdev *gdmadev = NULL;

+ struct zxdh_gdma_queue *queue = NULL;

+ uint val = 0;

+

+ queue = zxdh_gdma_get_queue(dev, queue_id);

+ if (queue == NULL)

+ return -EINVAL;

+

+ gdmadev = zxdh_gdma_rawdev_get_priv(dev);

+ gdmadev->used_num--;

+

+ /* disable gdma channel */

+ val = ZXDH_GDMA_CHAN_FORCE_CLOSE;

+ zxdh_gdma_write_reg(dev, queue_id, ZXDH_GDMA_CONTROL_OFFSET, val);

+

+ queue->enable           = 0;

+ queue->is_txq           = 0;

+ queue->flag             = 0;

+ queue->user             = 0;

+ queue->tc_cnt           = 0;

+ queue->ring.avail_idx   = 0;

+ queue->sw_ring.free_cnt = 0;

+ queue->sw_ring.deq_cnt  = 0;

+ queue->sw_ring.pend_cnt = 0;

+ queue->sw_ring.enq_idx  = 0;

+ queue->sw_ring.deq_idx  = 0;

+ queue->sw_ring.used_idx = 0;

+

+ if (queue->sw_ring.job != NULL)

+ rte_free(queue->sw_ring.job);

+

+ if (queue->ring.ring_mz != NULL)

+ rte_memzone_free(queue->ring.ring_mz);

+

+ return 0;

+}

+

+static int

+zxdh_gdma_rawdev_probe(struct rte_vdev_device *vdev)

+{

+ struct rte_rawdev *dev = NULL;

+ struct zxdh_gdma_rawdev *gdmadev = NULL;

+ struct zxdh_gdma_queue *queue = NULL;

+ uint8_t i = 0;

+

+ if (zxdh_gdma_pci_scan() != 0) {

+ ZXDH_PMD_LOG(ERR, "Failed to scan gdma pci device!");

+ return -1;

+ }

+

+ if ((gdev.bar_pa[0]) == 0) {

+ ZXDH_PMD_LOG(ERR, "Empty bars 0x%"PRIx64,

+ (uint64_t)gdev.bar_pa[0]);

+ zxdh_gdma_pci_dev_munmap();

+ return -ENODEV;

+ }

+ ZXDH_PMD_LOG(INFO, "%04x:%02x:%02x.%01x Bar0 PhyAddr: 0x%"PRIx64,

+ gdev.domain, gdev.bus, gdev.devid, gdev.function,

+ (uint64_t)gdev.bar_pa[0]);

+

+ dev = rte_rawdev_pmd_allocate(dev_name, sizeof(struct zxdh_gdma_rawdev), rte_socket_id());

+ if (dev == NULL) {

+ ZXDH_PMD_LOG(ERR, "Unable to allocate gdma rawdev");

+ zxdh_gdma_pci_dev_munmap();

+ return -1;

+ }

+ ZXDH_PMD_LOG(INFO, "Init %s on NUMA node %d, dev_id is %d",

+ dev_name, rte_socket_id(), dev->dev_id);

+

+ dev->dev_ops = &zxdh_gdma_rawdev_ops;

+ dev->device = &vdev->device;

+ dev->driver_name = zxdh_gdma_driver_name;

+ gdmadev = zxdh_gdma_rawdev_get_priv(dev);

+ gdmadev->device_state = ZXDH_GDMA_DEV_STOPPED;

+ gdmadev->rawdev = dev;

+ gdmadev->queue_num = ZXDH_GDMA_TOTAL_CHAN_NUM;

+ gdmadev->used_num = 0;

+ gdmadev->base_addr = (uintptr_t)gdev.bar_va[0] + ZXDH_GDMA_BASE_OFFSET;

+

+ for (i = 0; i < ZXDH_GDMA_TOTAL_CHAN_NUM; i++) {

+ queue = &(gdmadev->vqs[i]);

+ queue->enable = 0;

+ queue->queue_size = ZXDH_GDMA_QUEUE_SIZE;

+ rte_spinlock_init(&(queue->enqueue_lock));

+ }

+

+ return 0;

+}

+

+static int

+zxdh_gdma_rawdev_remove(__rte_unused struct rte_vdev_device *vdev)

+{

+ struct rte_rawdev *dev = NULL;

+ int ret = 0;

+

+ dev = rte_rawdev_pmd_get_named_dev(dev_name);

+ if (dev == NULL)

+ return -EINVAL;

+

+ /* rte_rawdev_close is called by pmd_release */

+ ret = rte_rawdev_pmd_release(dev);

+ if (ret != 0) {

+ ZXDH_PMD_LOG(ERR, "Device cleanup failed");

+ return -1;

+ }

+ ZXDH_PMD_LOG(DEBUG, "rawdev %s remove done!", dev_name);

+

+ return ret;

+}

+

+static struct rte_vdev_driver zxdh_gdma_pmd_drv = {

+ .probe = zxdh_gdma_rawdev_probe,

+ .remove = zxdh_gdma_rawdev_remove

+};

+

+RTE_PMD_REGISTER_VDEV(rawdev_zxdh_gdma, zxdh_gdma_pmd_drv);

+RTE_LOG_REGISTER_DEFAULT(zxdh_gdma_pmd_logtype, NOTICE);

diff --git a/drivers/raw/zxdh/zxdh_rawdev.h b/drivers/raw/zxdh/zxdh_rawdev.h

new file mode 100644

index 0000000000..8f26b4f01a

--- /dev/null

+++ b/drivers/raw/zxdh/zxdh_rawdev.h

@@ -0,0 +1,165 @@

+/* SPDX-License-Identifier: BSD-3-Clause

+ * Copyright 2024 ZTE Corporation

+ */

+

+#ifndef __ZXDH_RAWDEV_H__

+#define __ZXDH_RAWDEV_H__

+

+#ifdef __cplusplus

+extern "C" {

+#endif

+

+#include <rte_rawdev.h>

+#include <rte_spinlock.h>

+

+extern int zxdh_gdma_pmd_logtype;

+

+#define ZXDH_PMD_LOG(level, fmt, args...) \

+ rte_log(RTE_LOG_ ## level, zxdh_gdma_pmd_logtype, "%s(): " fmt "\n", __func__, ##args)

+

+#define ZXDH_GDMA_VENDORID                      0x1cf2

+#define ZXDH_GDMA_DEVICEID                      0x8044

+

+#define ZXDH_GDMA_TOTAL_CHAN_NUM                58

+#define ZXDH_GDMA_QUEUE_SIZE                    16384 /* >= 65*64*3 */

+#define ZXDH_GDMA_RING_SIZE                     32768

+

+/* States if the source addresses is physical. */

+#define ZXDH_GDMA_JOB_SRC_PHY                   (1UL)

+

+/* States if the destination addresses is physical. */

+#define ZXDH_GDMA_JOB_DEST_PHY                  (1UL << 1)

+

+/* ZF->HOST */

+#define ZXDH_GDMA_JOB_DIR_TX                    (1UL << 2)

+

+/* HOST->ZF */

+#define ZXDH_GDMA_JOB_DIR_RX                    (1UL << 3)

+

+#define ZXDH_GDMA_JOB_DIR_MASK                  (ZXDH_GDMA_JOB_DIR_TX | ZXDH_GDMA_JOB_DIR_RX)

+

+enum zxdh_gdma_device_state {

+ ZXDH_GDMA_DEV_RUNNING,

+ ZXDH_GDMA_DEV_STOPPED

+};

+

+struct zxdh_gdma_buff_desc {

+ uint SrcAddr_L;

+ uint DstAddr_L;

+ uint Xpara;

+ uint ZY_para;

+ uint ZY_SrcStep;

+ uint ZY_DstStep;

+ uint ExtAddr;

+ uint LLI_Addr_L;

+ uint LLI_Addr_H;

+ uint ChCont;

+ uint LLI_User;

+ uint ErrAddr;

+ uint Control;

+ uint SrcAddr_H;

+ uint DstAddr_H;

+ uint Reserved;

+};

+

+struct zxdh_gdma_queue {

+ uint8_t   enable;

+ uint8_t   is_txq;

+ uint16_t  vq_id;

+ uint16_t  queue_size;

+ /* 0:GDMA needs to be configured through the APB interface */

+ uint16_t  flag;

+ uint      user;

+ uint16_t  tc_cnt;

+ rte_spinlock_t enqueue_lock;

+ struct {

+ uint16_t avail_idx;

+ uint16_t last_avail_idx;

+ rte_iova_t ring_mem;

+ const struct rte_memzone *ring_mz;

+ struct zxdh_gdma_buff_desc *desc;

+ } ring;

+ struct {

+ uint16_t  free_cnt;

+ uint16_t  deq_cnt;

+ uint16_t  pend_cnt;

+ uint16_t  enq_idx;

+ uint16_t  deq_idx;

+ uint16_t  used_idx;

+ struct zxdh_gdma_job **job;

+ } sw_ring;

+};

+

+struct zxdh_gdma_rawdev {

+ struct rte_device *device;

+ struct rte_rawdev *rawdev;

+ uintptr_t base_addr;

+ uint8_t queue_num; /* total queue num */

+ uint8_t used_num;  /* used  queue num */

+ enum zxdh_gdma_device_state device_state;

+ struct zxdh_gdma_queue vqs[ZXDH_GDMA_TOTAL_CHAN_NUM];

+};

+

+struct zxdh_gdma_job {

+ uint64_t src;

+ uint64_t dest;

+ uint len;

+ uint flags;

+ uint64_t cnxt;

+ uint16_t status;

+ uint16_t vq_id;

+ void *usr_elem;

+ uint8_t ep_id;

+ uint8_t pf_id;

+ uint16_t vf_id;

+};

+

+struct zxdh_gdma_enqdeq {

+ uint16_t vq_id;

+ struct zxdh_gdma_job **job;

+};

+

+struct zxdh_gdma_config {

+ uint16_t max_hw_queues_per_core;

+ uint16_t max_vqs;

+ int fle_queue_pool_cnt;

+};

+

+struct zxdh_gdma_rbp {

+ uint use_ultrashort:1;

+ uint enable:1;

+ uint dportid:3;

+ uint dpfid:3;

+ uint dvfid:8; /*using route by port for destination */

+ uint drbp:1;

+ uint sportid:3;

+ uint spfid:3;

+ uint svfid:8;

+ uint srbp:1;

+};

+

+struct zxdh_gdma_queue_config {

+ uint lcore_id;

+ uint flags;

+ struct zxdh_gdma_rbp *rbp;

+};

+

+struct zxdh_gdma_attr {

+ uint16_t num_hw_queues;

+};

+

+static inline struct zxdh_gdma_rawdev *

+zxdh_gdma_rawdev_get_priv(const struct rte_rawdev *rawdev)

+{

+ return rawdev->dev_private;

+}

+

+uint zxdh_gdma_read_reg(struct rte_rawdev *dev, uint16_t qidx, uint offset);

+void zxdh_gdma_write_reg(struct rte_rawdev *dev, uint16_t qidx, uint offset, uint val);

+int zxdh_gdma_debug_info_dump(struct rte_rawdev *dev, uint16_t queue_id);

+

+#ifdef __cplusplus

+}

+#endif

+

+#endif /* __ZXDH_RAWDEV_H__ */

-- 

2.43.0