Add device pci init implementation,
to obtain PCI capability and read configuration, etc.
Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn>
---
drivers/net/zxdh/meson.build | 1 +
drivers/net/zxdh/zxdh_ethdev.c | 43 +++++
drivers/net/zxdh/zxdh_ethdev.h | 18 ++-
drivers/net/zxdh/zxdh_pci.c | 278 +++++++++++++++++++++++++++++++++
drivers/net/zxdh/zxdh_pci.h | 138 ++++++++++++++++
drivers/net/zxdh/zxdh_queue.h | 109 +++++++++++++
drivers/net/zxdh/zxdh_rxtx.h | 55 +++++++
7 files changed, 641 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/zxdh/zxdh_pci.c
create mode 100644 drivers/net/zxdh/zxdh_pci.h
create mode 100644 drivers/net/zxdh/zxdh_queue.h
create mode 100644 drivers/net/zxdh/zxdh_rxtx.h
diff --git a/drivers/net/zxdh/meson.build b/drivers/net/zxdh/meson.build
index 932fb1c835..7db4e7bc71 100644
--- a/drivers/net/zxdh/meson.build
+++ b/drivers/net/zxdh/meson.build
@@ -15,4 +15,5 @@ endif
sources = files(
'zxdh_ethdev.c',
+ 'zxdh_pci.c',
)
diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index c911284423..5c747882a7 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -8,6 +8,40 @@
#include "zxdh_ethdev.h"
#include "zxdh_logs.h"
+#include "zxdh_pci.h"
+
+struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS];
+
+static int32_t zxdh_init_device(struct rte_eth_dev *eth_dev)
+{
+ struct zxdh_hw *hw = eth_dev->data->dev_private;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ int ret = 0;
+
+ ret = zxdh_read_pci_caps(pci_dev, hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "port 0x%x pci caps read failed .", hw->port_id);
+ goto err;
+ }
+
+ zxdh_hw_internal[hw->port_id].zxdh_vtpci_ops = &zxdh_dev_pci_ops;
+ zxdh_pci_reset(hw);
+ zxdh_get_pci_dev_config(hw);
+
+ rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr, ð_dev->data->mac_addrs[0]);
+
+ /* If host does not support both status and MSI-X then disable LSC */
+ if (vtpci_with_feature(hw, ZXDH_NET_F_STATUS) && hw->use_msix != ZXDH_MSIX_NONE)
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+ else
+ eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
+
+ return 0;
+
+err:
+ PMD_INIT_LOG(ERR, "port %d init device failed", eth_dev->data->port_id);
+ return ret;
+}
static int zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)
{
@@ -45,6 +79,15 @@ static int zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)
hw->is_pf = 1;
}
+ ret = zxdh_init_device(eth_dev);
+ if (ret < 0)
+ goto err_zxdh_init;
+
+ return ret;
+
+err_zxdh_init:
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
return ret;
}
diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h
index a11e3624a9..a22ac15065 100644
--- a/drivers/net/zxdh/zxdh_ethdev.h
+++ b/drivers/net/zxdh/zxdh_ethdev.h
@@ -5,6 +5,7 @@
#ifndef ZXDH_ETHDEV_H
#define ZXDH_ETHDEV_H
+#include <rte_ether.h>
#include "ethdev_driver.h"
#ifdef __cplusplus
@@ -24,15 +25,30 @@ extern "C" {
#define ZXDH_MAX_MAC_ADDRS (ZXDH_MAX_UC_MAC_ADDRS + ZXDH_MAX_MC_MAC_ADDRS)
#define ZXDH_NUM_BARS 2
+#define ZXDH_RX_QUEUES_MAX 128U
+#define ZXDH_TX_QUEUES_MAX 128U
struct zxdh_hw {
struct rte_eth_dev *eth_dev;
- uint64_t bar_addr[ZXDH_NUM_BARS];
+ struct zxdh_pci_common_cfg *common_cfg;
+ struct zxdh_net_config *dev_cfg;
+ uint64_t bar_addr[ZXDH_NUM_BARS];
+ uint64_t host_features;
+ uint64_t guest_features;
+ uint32_t max_queue_pairs;
uint32_t speed;
+ uint32_t notify_off_multiplier;
+ uint16_t *notify_base;
+ uint16_t pcie_id;
uint16_t device_id;
uint16_t port_id;
+ uint8_t *isr;
+ uint8_t weak_barriers;
+ uint8_t use_msix;
+ uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
+
uint8_t duplex;
uint8_t is_pf;
};
diff --git a/drivers/net/zxdh/zxdh_pci.c b/drivers/net/zxdh/zxdh_pci.c
new file mode 100644
index 0000000000..a88d620f30
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_pci.c
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <stdint.h>
+#include <unistd.h>
+
+#include <rte_io.h>
+#include <rte_bus.h>
+#include <rte_pci.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include "zxdh_ethdev.h"
+#include "zxdh_pci.h"
+#include "zxdh_logs.h"
+#include "zxdh_queue.h"
+
+#define ZXDH_PMD_DEFAULT_GUEST_FEATURES \
+ (1ULL << ZXDH_NET_F_MRG_RXBUF | \
+ 1ULL << ZXDH_NET_F_STATUS | \
+ 1ULL << ZXDH_NET_F_MQ | \
+ 1ULL << ZXDH_F_ANY_LAYOUT | \
+ 1ULL << ZXDH_F_VERSION_1 | \
+ 1ULL << ZXDH_F_RING_PACKED | \
+ 1ULL << ZXDH_F_IN_ORDER | \
+ 1ULL << ZXDH_F_NOTIFICATION_DATA | \
+ 1ULL << ZXDH_NET_F_MAC)
+
+static void zxdh_read_dev_config(struct zxdh_hw *hw,
+ size_t offset,
+ void *dst,
+ int32_t length)
+{
+ int32_t i = 0;
+ uint8_t *p = NULL;
+ uint8_t old_gen = 0;
+ uint8_t new_gen = 0;
+
+ do {
+ old_gen = rte_read8(&hw->common_cfg->config_generation);
+
+ p = dst;
+ for (i = 0; i < length; i++)
+ *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i);
+
+ new_gen = rte_read8(&hw->common_cfg->config_generation);
+ } while (old_gen != new_gen);
+}
+
+static void zxdh_write_dev_config(struct zxdh_hw *hw,
+ size_t offset,
+ const void *src,
+ int32_t length)
+{
+ int32_t i = 0;
+ const uint8_t *p = src;
+
+ for (i = 0; i < length; i++)
+ rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i));
+}
+
+static uint8_t zxdh_get_status(struct zxdh_hw *hw)
+{
+ return rte_read8(&hw->common_cfg->device_status);
+}
+
+static void zxdh_set_status(struct zxdh_hw *hw, uint8_t status)
+{
+ rte_write8(status, &hw->common_cfg->device_status);
+}
+
+static uint64_t zxdh_get_features(struct zxdh_hw *hw)
+{
+ uint32_t features_lo = 0;
+ uint32_t features_hi = 0;
+
+ rte_write32(0, &hw->common_cfg->device_feature_select);
+ features_lo = rte_read32(&hw->common_cfg->device_feature);
+
+ rte_write32(1, &hw->common_cfg->device_feature_select);
+ features_hi = rte_read32(&hw->common_cfg->device_feature);
+
+ return ((uint64_t)features_hi << 32) | features_lo;
+}
+
+static void zxdh_set_features(struct zxdh_hw *hw, uint64_t features)
+{
+ rte_write32(0, &hw->common_cfg->guest_feature_select);
+ rte_write32(features & ((1ULL << 32) - 1), &hw->common_cfg->guest_feature);
+ rte_write32(1, &hw->common_cfg->guest_feature_select);
+ rte_write32(features >> 32, &hw->common_cfg->guest_feature);
+}
+
+const struct zxdh_pci_ops zxdh_dev_pci_ops = {
+ .read_dev_cfg = zxdh_read_dev_config,
+ .write_dev_cfg = zxdh_write_dev_config,
+ .get_status = zxdh_get_status,
+ .set_status = zxdh_set_status,
+ .get_features = zxdh_get_features,
+ .set_features = zxdh_set_features,
+};
+
+uint16_t zxdh_pci_get_features(struct zxdh_hw *hw)
+{
+ return ZXDH_VTPCI_OPS(hw)->get_features(hw);
+}
+
+void zxdh_pci_reset(struct zxdh_hw *hw)
+{
+ PMD_INIT_LOG(INFO, "port %u device start reset, just wait...", hw->port_id);
+ uint32_t retry = 0;
+
+ ZXDH_VTPCI_OPS(hw)->set_status(hw, ZXDH_CONFIG_STATUS_RESET);
+ /* Flush status write and wait device ready max 3 seconds. */
+ while (ZXDH_VTPCI_OPS(hw)->get_status(hw) != ZXDH_CONFIG_STATUS_RESET) {
+ ++retry;
+ rte_delay_ms(1);
+ }
+ PMD_INIT_LOG(INFO, "port %u device reset %u ms done", hw->port_id, retry);
+}
+
+static void *get_cfg_addr(struct rte_pci_device *dev, struct zxdh_pci_cap *cap)
+{
+ uint8_t bar = cap->bar;
+ uint32_t length = cap->length;
+ uint32_t offset = cap->offset;
+
+ if (bar >= PCI_MAX_RESOURCE) {
+ PMD_INIT_LOG(ERR, "invalid bar: %u", bar);
+ return NULL;
+ }
+ if (offset + length < offset) {
+ PMD_INIT_LOG(ERR, "offset(%u) + length(%u) overflows", offset, length);
+ return NULL;
+ }
+ if (offset + length > dev->mem_resource[bar].len) {
+ PMD_INIT_LOG(ERR, "invalid cap: overflows bar space");
+ return NULL;
+ }
+ uint8_t *base = dev->mem_resource[bar].addr;
+
+ if (base == NULL) {
+ PMD_INIT_LOG(ERR, "bar %u base addr is NULL", bar);
+ return NULL;
+ }
+ return base + offset;
+}
+
+int32_t zxdh_read_pci_caps(struct rte_pci_device *dev, struct zxdh_hw *hw)
+{
+ struct zxdh_pci_cap cap;
+ uint8_t pos = 0;
+ int32_t ret = 0;
+
+ if (dev->mem_resource[0].addr == NULL) {
+ PMD_INIT_LOG(ERR, "bar0 base addr is NULL");
+ return -1;
+ }
+
+ hw->use_msix = zxdh_pci_msix_detect(dev);
+
+ pos = rte_pci_find_capability(dev, RTE_PCI_CAP_ID_VNDR);
+ while (pos) {
+ ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos);
+ if (ret != sizeof(cap)) {
+ PMD_INIT_LOG(ERR, "failed to read pci cap at pos: %x ret %d", pos, ret);
+ break;
+ }
+ if (cap.cap_vndr != RTE_PCI_CAP_ID_VNDR) {
+ PMD_INIT_LOG(DEBUG, "[%2x] skipping non VNDR cap id: %02x",
+ pos, cap.cap_vndr);
+ goto next;
+ }
+ PMD_INIT_LOG(DEBUG, "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",
+ pos, cap.cfg_type, cap.bar, cap.offset, cap.length);
+
+ switch (cap.cfg_type) {
+ case ZXDH_PCI_CAP_COMMON_CFG:
+ hw->common_cfg = get_cfg_addr(dev, &cap);
+ break;
+ case ZXDH_PCI_CAP_NOTIFY_CFG: {
+ ret = rte_pci_read_config(dev, &hw->notify_off_multiplier,
+ 4, pos + sizeof(cap));
+ if (ret != 4)
+ PMD_INIT_LOG(ERR,
+ "failed to read notify_off_multiplier, ret %d", ret);
+ else
+ hw->notify_base = get_cfg_addr(dev, &cap);
+ break;
+ }
+ case ZXDH_PCI_CAP_DEVICE_CFG:
+ hw->dev_cfg = get_cfg_addr(dev, &cap);
+ break;
+ case ZXDH_PCI_CAP_ISR_CFG:
+ hw->isr = get_cfg_addr(dev, &cap);
+ break;
+ case ZXDH_PCI_CAP_PCI_CFG: {
+ hw->pcie_id = *(uint16_t *)&cap.padding[1];
+ PMD_INIT_LOG(DEBUG, "get pcie id 0x%x", hw->pcie_id);
+
+ if ((hw->pcie_id >> 11) & 0x1) /* PF */ {
+ PMD_INIT_LOG(DEBUG, "EP %u PF %u",
+ hw->pcie_id >> 12, (hw->pcie_id >> 8) & 0x7);
+ } else { /* VF */
+ PMD_INIT_LOG(DEBUG, "EP %u PF %u VF %u",
+ hw->pcie_id >> 12,
+ (hw->pcie_id >> 8) & 0x7,
+ hw->pcie_id & 0xff);
+ }
+ break;
+ }
+ }
+next:
+ pos = cap.cap_next;
+ }
+ if (hw->common_cfg == NULL || hw->notify_base == NULL ||
+ hw->dev_cfg == NULL || hw->isr == NULL) {
+ PMD_INIT_LOG(ERR, "no zxdh pci device found.");
+ return -1;
+ }
+ return 0;
+}
+
+void zxdh_pci_read_dev_config(struct zxdh_hw *hw, size_t offset, void *dst, int32_t length)
+{
+ ZXDH_VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
+}
+
+int32_t zxdh_get_pci_dev_config(struct zxdh_hw *hw)
+{
+ uint64_t guest_features = 0;
+ uint64_t nego_features = 0;
+ uint32_t max_queue_pairs = 0;
+
+ hw->host_features = zxdh_pci_get_features(hw);
+
+ guest_features = (uint64_t)ZXDH_PMD_DEFAULT_GUEST_FEATURES;
+ nego_features = guest_features & hw->host_features;
+
+ hw->guest_features = nego_features;
+
+ if (hw->guest_features & (1ULL << ZXDH_NET_F_MAC)) {
+ zxdh_pci_read_dev_config(hw, offsetof(struct zxdh_net_config, mac),
+ &hw->mac_addr, RTE_ETHER_ADDR_LEN);
+ } else {
+ rte_eth_random_addr(&hw->mac_addr[0]);
+ }
+
+ zxdh_pci_read_dev_config(hw, offsetof(struct zxdh_net_config, max_virtqueue_pairs),
+ &max_queue_pairs, sizeof(max_queue_pairs));
+
+ if (max_queue_pairs == 0)
+ hw->max_queue_pairs = ZXDH_RX_QUEUES_MAX;
+ else
+ hw->max_queue_pairs = RTE_MIN(ZXDH_RX_QUEUES_MAX, max_queue_pairs);
+ PMD_INIT_LOG(DEBUG, "set max queue pairs %d", hw->max_queue_pairs);
+
+ return 0;
+}
+
+enum zxdh_msix_status zxdh_pci_msix_detect(struct rte_pci_device *dev)
+{
+ uint16_t flags = 0;
+ uint8_t pos = 0;
+ int16_t ret = 0;
+
+ pos = rte_pci_find_capability(dev, RTE_PCI_CAP_ID_MSIX);
+
+ if (pos > 0) {
+ ret = rte_pci_read_config(dev, &flags, 2, pos + RTE_PCI_MSIX_FLAGS);
+ if (ret == 2 && flags & RTE_PCI_MSIX_FLAGS_ENABLE)
+ return ZXDH_MSIX_ENABLED;
+ else
+ return ZXDH_MSIX_DISABLED;
+ }
+ return ZXDH_MSIX_NONE;
+}
diff --git a/drivers/net/zxdh/zxdh_pci.h b/drivers/net/zxdh/zxdh_pci.h
new file mode 100644
index 0000000000..ff656f28e6
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_pci.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef ZXDH_PCI_H
+#define ZXDH_PCI_H
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#include <bus_pci_driver.h>
+
+#include "zxdh_ethdev.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum zxdh_msix_status {
+ ZXDH_MSIX_NONE = 0,
+ ZXDH_MSIX_DISABLED = 1,
+ ZXDH_MSIX_ENABLED = 2
+};
+
+#define ZXDH_NET_F_MAC 5 /* Host has given MAC address. */
+#define ZXDH_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */
+#define ZXDH_NET_F_STATUS 16 /* zxdh_net_config.status available */
+#define ZXDH_NET_F_MQ 22 /* Device supports Receive Flow Steering */
+#define ZXDH_F_ANY_LAYOUT 27 /* Can the device handle any descriptor layout */
+#define ZXDH_F_VERSION_1 32
+#define ZXDH_F_RING_PACKED 34
+#define ZXDH_F_IN_ORDER 35
+#define ZXDH_F_NOTIFICATION_DATA 38
+
+#define ZXDH_PCI_CAP_COMMON_CFG 1 /* Common configuration */
+#define ZXDH_PCI_CAP_NOTIFY_CFG 2 /* Notifications */
+#define ZXDH_PCI_CAP_ISR_CFG 3 /* ISR Status */
+#define ZXDH_PCI_CAP_DEVICE_CFG 4 /* Device specific configuration */
+#define ZXDH_PCI_CAP_PCI_CFG 5 /* PCI configuration access */
+
+/* Status byte for guest to report progress. */
+#define ZXDH_CONFIG_STATUS_RESET 0x00
+#define ZXDH_CONFIG_STATUS_ACK 0x01
+#define ZXDH_CONFIG_STATUS_DRIVER 0x02
+#define ZXDH_CONFIG_STATUS_DRIVER_OK 0x04
+#define ZXDH_CONFIG_STATUS_FEATURES_OK 0x08
+#define ZXDH_CONFIG_STATUS_DEV_NEED_RESET 0x40
+#define ZXDH_CONFIG_STATUS_FAILED 0x80
+
+struct zxdh_net_config {
+ /* The config defining mac address (if ZXDH_NET_F_MAC) */
+ uint8_t mac[RTE_ETHER_ADDR_LEN];
+ /* See ZXDH_NET_F_STATUS and ZXDH_NET_S_* above */
+ uint16_t status;
+ uint16_t max_virtqueue_pairs;
+ uint16_t mtu;
+ uint32_t speed;
+ uint8_t duplex;
+} __rte_packed;
+
+/* This is the PCI capability header: */
+struct zxdh_pci_cap {
+ uint8_t cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
+ uint8_t cap_next; /* Generic PCI field: next ptr. */
+ uint8_t cap_len; /* Generic PCI field: capability length */
+ uint8_t cfg_type; /* Identifies the structure. */
+ uint8_t bar; /* Where to find it. */
+ uint8_t padding[3]; /* Pad to full dword. */
+ uint32_t offset; /* Offset within bar. */
+ uint32_t length; /* Length of the structure, in bytes. */
+};
+
+/* Fields in ZXDH_PCI_CAP_COMMON_CFG: */
+struct zxdh_pci_common_cfg {
+ /* About the whole device. */
+ uint32_t device_feature_select; /* read-write */
+ uint32_t device_feature; /* read-only */
+ uint32_t guest_feature_select; /* read-write */
+ uint32_t guest_feature; /* read-write */
+ uint16_t msix_config; /* read-write */
+ uint16_t num_queues; /* read-only */
+ uint8_t device_status; /* read-write */
+ uint8_t config_generation; /* read-only */
+
+ /* About a specific virtqueue. */
+ uint16_t queue_select; /* read-write */
+ uint16_t queue_size; /* read-write, power of 2. */
+ uint16_t queue_msix_vector; /* read-write */
+ uint16_t queue_enable; /* read-write */
+ uint16_t queue_notify_off; /* read-only */
+ uint32_t queue_desc_lo; /* read-write */
+ uint32_t queue_desc_hi; /* read-write */
+ uint32_t queue_avail_lo; /* read-write */
+ uint32_t queue_avail_hi; /* read-write */
+ uint32_t queue_used_lo; /* read-write */
+ uint32_t queue_used_hi; /* read-write */
+};
+
+static inline int32_t vtpci_with_feature(struct zxdh_hw *hw, uint64_t bit)
+{
+ return (hw->guest_features & (1ULL << bit)) != 0;
+}
+
+struct zxdh_pci_ops {
+ void (*read_dev_cfg)(struct zxdh_hw *hw, size_t offset, void *dst, int32_t len);
+ void (*write_dev_cfg)(struct zxdh_hw *hw, size_t offset, const void *src, int32_t len);
+
+ uint8_t (*get_status)(struct zxdh_hw *hw);
+ void (*set_status)(struct zxdh_hw *hw, uint8_t status);
+
+ uint64_t (*get_features)(struct zxdh_hw *hw);
+ void (*set_features)(struct zxdh_hw *hw, uint64_t features);
+};
+
+struct zxdh_hw_internal {
+ const struct zxdh_pci_ops *zxdh_vtpci_ops;
+};
+
+#define ZXDH_VTPCI_OPS(hw) (zxdh_hw_internal[(hw)->port_id].zxdh_vtpci_ops)
+
+extern struct zxdh_hw_internal zxdh_hw_internal[RTE_MAX_ETHPORTS];
+extern const struct zxdh_pci_ops zxdh_dev_pci_ops;
+
+void zxdh_pci_reset(struct zxdh_hw *hw);
+void zxdh_pci_read_dev_config(struct zxdh_hw *hw, size_t offset,
+ void *dst, int32_t length);
+
+int32_t zxdh_read_pci_caps(struct rte_pci_device *dev, struct zxdh_hw *hw);
+int32_t zxdh_get_pci_dev_config(struct zxdh_hw *hw);
+
+uint16_t zxdh_pci_get_features(struct zxdh_hw *hw);
+enum zxdh_msix_status zxdh_pci_msix_detect(struct rte_pci_device *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ZXDH_PCI_H */
diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h
new file mode 100644
index 0000000000..0b6f48adf9
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_queue.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef ZXDH_QUEUE_H
+#define ZXDH_QUEUE_H
+
+#include <stdint.h>
+
+#include <rte_common.h>
+
+#include "zxdh_ethdev.h"
+#include "zxdh_rxtx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** ring descriptors: 16 bytes.
+ * These can chain together via "next".
+ **/
+struct zxdh_vring_desc {
+ uint64_t addr; /* Address (guest-physical). */
+ uint32_t len; /* Length. */
+ uint16_t flags; /* The flags as indicated above. */
+ uint16_t next; /* We chain unused descriptors via this. */
+} __rte_packed;
+
+struct zxdh_vring_avail {
+ uint16_t flags;
+ uint16_t idx;
+ uint16_t ring[];
+} __rte_packed;
+
+struct zxdh_vring_packed_desc {
+ uint64_t addr;
+ uint32_t len;
+ uint16_t id;
+ uint16_t flags;
+} __rte_packed;
+
+struct zxdh_vring_packed_desc_event {
+ uint16_t desc_event_off_wrap;
+ uint16_t desc_event_flags;
+} __rte_packed;
+
+struct zxdh_vring_packed {
+ uint32_t num;
+ struct zxdh_vring_packed_desc *desc;
+ struct zxdh_vring_packed_desc_event *driver;
+ struct zxdh_vring_packed_desc_event *device;
+} __rte_packed;
+
+struct zxdh_vq_desc_extra {
+ void *cookie;
+ uint16_t ndescs;
+ uint16_t next;
+} __rte_packed;
+
+struct zxdh_virtqueue {
+ struct zxdh_hw *hw; /**< zxdh_hw structure pointer. */
+ struct {
+ /**< vring keeping descs and events */
+ struct zxdh_vring_packed ring;
+ uint8_t used_wrap_counter;
+ uint8_t rsv;
+ uint16_t cached_flags; /**< cached flags for descs */
+ uint16_t event_flags_shadow;
+ uint16_t rsv1;
+ } __rte_packed vq_packed;
+ uint16_t vq_used_cons_idx; /**< last consumed descriptor */
+ uint16_t vq_nentries; /**< vring desc numbers */
+ uint16_t vq_free_cnt; /**< num of desc available */
+ uint16_t vq_avail_idx; /**< sync until needed */
+ uint16_t vq_free_thresh; /**< free threshold */
+ uint16_t rsv2;
+
+ void *vq_ring_virt_mem; /**< linear address of vring*/
+ uint32_t vq_ring_size;
+
+ union {
+ struct zxdh_virtnet_rx rxq;
+ struct zxdh_virtnet_tx txq;
+ };
+
+ /** < physical address of vring,
+ * or virtual address
+ **/
+ rte_iova_t vq_ring_mem;
+
+ /**
+ * Head of the free chain in the descriptor table. If
+ * there are no free descriptors, this will be set to
+ * VQ_RING_DESC_CHAIN_END.
+ **/
+ uint16_t vq_desc_head_idx;
+ uint16_t vq_desc_tail_idx;
+ uint16_t vq_queue_index; /**< PCI queue index */
+ uint16_t offset; /**< relative offset to obtain addr in mbuf */
+ uint16_t *notify_addr;
+ struct rte_mbuf **sw_ring; /**< RX software ring. */
+ struct zxdh_vq_desc_extra vq_descx[];
+} __rte_packed;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ZXDH_QUEUE_H */
diff --git a/drivers/net/zxdh/zxdh_rxtx.h b/drivers/net/zxdh/zxdh_rxtx.h
new file mode 100644
index 0000000000..7d4b5481ec
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_rxtx.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#ifndef ZXDH_RXTX_H
+#define ZXDH_RXTX_H
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_mbuf_core.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct zxdh_virtnet_stats {
+ uint64_t packets;
+ uint64_t bytes;
+ uint64_t errors;
+ uint64_t multicast;
+ uint64_t broadcast;
+ uint64_t truncated_err;
+ uint64_t size_bins[8];
+};
+
+struct zxdh_virtnet_rx {
+ struct zxdh_virtqueue *vq;
+
+ /* dummy mbuf, for wraparound when processing RX ring. */
+ struct rte_mbuf fake_mbuf;
+
+ uint64_t mbuf_initializer; /* value to init mbufs. */
+ struct rte_mempool *mpool; /* mempool for mbuf allocation */
+ uint16_t queue_id; /* DPDK queue index. */
+ uint16_t port_id; /* Device port identifier. */
+ struct zxdh_virtnet_stats stats;
+ const struct rte_memzone *mz; /* mem zone to populate RX ring. */
+} __rte_packed;
+
+struct zxdh_virtnet_tx {
+ struct zxdh_virtqueue *vq;
+ const struct rte_memzone *zxdh_net_hdr_mz; /* memzone to populate hdr. */
+ rte_iova_t zxdh_net_hdr_mem; /* hdr for each xmit packet */
+ uint16_t queue_id; /* DPDK queue index. */
+ uint16_t port_id; /* Device port identifier. */
+ struct zxdh_virtnet_stats stats;
+ const struct rte_memzone *mz; /* mem zone to populate TX ring. */
+} __rte_packed;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ZXDH_RXTX_H */
--
2.27.0