From: Junlong Wang <wang.junlong1@zte.com.cn>
To: ferruh.yigit@amd.com
Cc: dev@dpdk.org, wang.yong19@zte.com.cn,
Junlong Wang <wang.junlong1@zte.com.cn>
Subject: [PATCH v10 09/10] net/zxdh: add zxdh dev configure ops
Date: Mon, 4 Nov 2024 19:58:52 +0800 [thread overview]
Message-ID: <20241104115856.2795213-10-wang.junlong1@zte.com.cn> (raw)
In-Reply-To: <20241104115856.2795213-1-wang.junlong1@zte.com.cn>
[-- Attachment #1.1.1: Type: text/plain, Size: 38653 bytes --]
provided zxdh dev configure ops for queue
check,reset,alloc resources,etc.
Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn>
---
drivers/net/zxdh/meson.build | 1 +
drivers/net/zxdh/zxdh_common.c | 144 +++++++++++
drivers/net/zxdh/zxdh_common.h | 11 +
drivers/net/zxdh/zxdh_ethdev.c | 459 +++++++++++++++++++++++++++++++++
drivers/net/zxdh/zxdh_ethdev.h | 16 ++
drivers/net/zxdh/zxdh_pci.c | 106 ++++++++
drivers/net/zxdh/zxdh_pci.h | 30 ++-
drivers/net/zxdh/zxdh_queue.c | 127 +++++++++
drivers/net/zxdh/zxdh_queue.h | 175 +++++++++++++
9 files changed, 1068 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/zxdh/zxdh_queue.c
diff --git a/drivers/net/zxdh/meson.build b/drivers/net/zxdh/meson.build
index a16db47f89..b96aa5a27e 100644
--- a/drivers/net/zxdh/meson.build
+++ b/drivers/net/zxdh/meson.build
@@ -18,4 +18,5 @@ sources = files(
'zxdh_pci.c',
'zxdh_msg.c',
'zxdh_common.c',
+ 'zxdh_queue.c',
)
diff --git a/drivers/net/zxdh/zxdh_common.c b/drivers/net/zxdh/zxdh_common.c
index 0d7ea4535d..4f18c97ed7 100644
--- a/drivers/net/zxdh/zxdh_common.c
+++ b/drivers/net/zxdh/zxdh_common.c
@@ -20,6 +20,7 @@
#define ZXDH_COMMON_TABLE_WRITE 1
#define ZXDH_COMMON_FIELD_PHYPORT 6
+#define ZXDH_COMMON_FIELD_DATACH 3
#define ZXDH_RSC_TBL_CONTENT_LEN_MAX (257 * 2)
@@ -254,3 +255,146 @@ zxdh_panelid_get(struct rte_eth_dev *dev, uint8_t *panelid)
int32_t ret = zxdh_get_res_panel_id(¶m, panelid);
return ret;
}
+
+uint32_t
+zxdh_read_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ uint64_t baseaddr = (uint64_t)(hw->bar_addr[bar]);
+ uint32_t val = *((volatile uint32_t *)(baseaddr + reg));
+ return val;
+}
+
+void
+zxdh_write_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg, uint32_t val)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ uint64_t baseaddr = (uint64_t)(hw->bar_addr[bar]);
+ *((volatile uint32_t *)(baseaddr + reg)) = val;
+}
+
+static bool
+zxdh_try_lock(struct zxdh_hw *hw)
+{
+ uint32_t var = zxdh_read_comm_reg((uint64_t)hw->common_cfg, ZXDH_VF_LOCK_REG);
+
+ /* check whether lock is used */
+ if (!(var & ZXDH_VF_LOCK_ENABLE_MASK))
+ return false;
+
+ return true;
+}
+
+int32_t
+zxdh_timedlock(struct zxdh_hw *hw, uint32_t us)
+{
+ uint16_t timeout = 0;
+
+ while ((timeout++) < ZXDH_ACQUIRE_CHANNEL_NUM_MAX) {
+ rte_delay_us_block(us);
+ /* acquire hw lock */
+ if (!zxdh_try_lock(hw)) {
+ PMD_DRV_LOG(ERR, "Acquiring hw lock got failed, timeout: %d", timeout);
+ continue;
+ }
+ break;
+ }
+ if (timeout >= ZXDH_ACQUIRE_CHANNEL_NUM_MAX) {
+ PMD_DRV_LOG(ERR, "Failed to acquire channel");
+ return -1;
+ }
+ return 0;
+}
+
+void
+zxdh_release_lock(struct zxdh_hw *hw)
+{
+ uint32_t var = zxdh_read_comm_reg((uint64_t)hw->common_cfg, ZXDH_VF_LOCK_REG);
+
+ if (var & ZXDH_VF_LOCK_ENABLE_MASK) {
+ var &= ~ZXDH_VF_LOCK_ENABLE_MASK;
+ zxdh_write_comm_reg((uint64_t)hw->common_cfg, ZXDH_VF_LOCK_REG, var);
+ }
+}
+
+uint32_t
+zxdh_read_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg)
+{
+ uint32_t val = *((volatile uint32_t *)(pci_comm_cfg_baseaddr + reg));
+ return val;
+}
+
+void
+zxdh_write_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg, uint32_t val)
+{
+ *((volatile uint32_t *)(pci_comm_cfg_baseaddr + reg)) = val;
+}
+
+static int32_t
+zxdh_common_table_write(struct zxdh_hw *hw, uint8_t field,
+ void *buff, uint16_t buff_size)
+{
+ struct zxdh_pci_bar_msg desc;
+ struct zxdh_msg_recviver_mem msg_rsp;
+ int32_t ret = 0;
+
+ if (!hw->msg_chan_init) {
+ PMD_DRV_LOG(ERR, "Bar messages channel not initialized");
+ return -1;
+ }
+ if (buff_size != 0 && buff == NULL) {
+ PMD_DRV_LOG(ERR, "Buff is invalid");
+ return -1;
+ }
+
+ ret = zxdh_fill_common_msg(hw, &desc, ZXDH_COMMON_TABLE_WRITE,
+ field, buff, buff_size);
+
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to fill common msg");
+ return ret;
+ }
+
+ ret = zxdh_send_command(hw, &desc, ZXDH_BAR_MODULE_TBL, &msg_rsp);
+ if (ret != 0)
+ goto free_msg_data;
+
+ ret = zxdh_common_rsp_check(&msg_rsp, NULL, 0);
+ if (ret != 0)
+ goto free_rsp_data;
+
+free_rsp_data:
+ rte_free(msg_rsp.recv_buffer);
+free_msg_data:
+ rte_free(desc.payload_addr);
+ return ret;
+}
+
+int32_t
+zxdh_datach_set(struct rte_eth_dev *dev)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ uint16_t buff_size = (hw->queue_num + 1) * 2;
+ int32_t ret = 0;
+ uint16_t i;
+
+ void *buff = rte_zmalloc(NULL, buff_size, 0);
+ if (unlikely(buff == NULL)) {
+ PMD_DRV_LOG(ERR, "Failed to allocate buff");
+ return -ENOMEM;
+ }
+ memset(buff, 0, buff_size);
+ uint16_t *pdata = (uint16_t *)buff;
+ *pdata++ = hw->queue_num;
+
+ for (i = 0; i < hw->queue_num; i++)
+ *(pdata + i) = hw->channel_context[i].ph_chno;
+
+ ret = zxdh_common_table_write(hw, ZXDH_COMMON_FIELD_DATACH,
+ (void *)buff, buff_size);
+ if (ret != 0)
+ PMD_DRV_LOG(ERR, "Failed to setup data channel of common table");
+
+ rte_free(buff);
+ return ret;
+}
diff --git a/drivers/net/zxdh/zxdh_common.h b/drivers/net/zxdh/zxdh_common.h
index ba29ca1dad..4a06da9495 100644
--- a/drivers/net/zxdh/zxdh_common.h
+++ b/drivers/net/zxdh/zxdh_common.h
@@ -14,6 +14,10 @@
extern "C" {
#endif
+#define ZXDH_VF_LOCK_REG 0x90
+#define ZXDH_VF_LOCK_ENABLE_MASK 0x1
+#define ZXDH_ACQUIRE_CHANNEL_NUM_MAX 10
+
struct zxdh_res_para {
uint64_t virt_addr;
uint16_t pcie_id;
@@ -22,6 +26,13 @@ struct zxdh_res_para {
int32_t zxdh_phyport_get(struct rte_eth_dev *dev, uint8_t *phyport);
int32_t zxdh_panelid_get(struct rte_eth_dev *dev, uint8_t *pannelid);
+uint32_t zxdh_read_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg);
+void zxdh_write_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg, uint32_t val);
+void zxdh_release_lock(struct zxdh_hw *hw);
+int32_t zxdh_timedlock(struct zxdh_hw *hw, uint32_t us);
+uint32_t zxdh_read_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg);
+void zxdh_write_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg, uint32_t val);
+int32_t zxdh_datach_set(struct rte_eth_dev *dev);
#ifdef __cplusplus
}
diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index 11ec5dc34f..54e51a31fa 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -374,8 +374,467 @@ zxdh_configure_intr(struct rte_eth_dev *dev)
return ret;
}
+static int32_t
+zxdh_features_update(struct zxdh_hw *hw,
+ const struct rte_eth_rxmode *rxmode,
+ const struct rte_eth_txmode *txmode)
+{
+ uint64_t rx_offloads = rxmode->offloads;
+ uint64_t tx_offloads = txmode->offloads;
+ uint64_t req_features = hw->guest_features;
+
+ if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
+ req_features |= (1ULL << ZXDH_NET_F_GUEST_CSUM);
+
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
+ req_features |= (1ULL << ZXDH_NET_F_GUEST_TSO4) |
+ (1ULL << ZXDH_NET_F_GUEST_TSO6);
+
+ if (tx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
+ req_features |= (1ULL << ZXDH_NET_F_CSUM);
+
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)
+ req_features |= (1ULL << ZXDH_NET_F_HOST_TSO4) |
+ (1ULL << ZXDH_NET_F_HOST_TSO6);
+
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_TSO)
+ req_features |= (1ULL << ZXDH_NET_F_HOST_UFO);
+
+ req_features = req_features & hw->host_features;
+ hw->guest_features = req_features;
+
+ ZXDH_VTPCI_OPS(hw)->set_features(hw, req_features);
+
+ if ((rx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) &&
+ !vtpci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM)) {
+ PMD_DRV_LOG(ERR, "rx checksum not available on this host");
+ return -ENOTSUP;
+ }
+
+ if ((rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) &&
+ (!vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) ||
+ !vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6))) {
+ PMD_DRV_LOG(ERR, "Large Receive Offload not available on this host");
+ return -ENOTSUP;
+ }
+ return 0;
+}
+
+static bool
+rx_offload_enabled(struct zxdh_hw *hw)
+{
+ return vtpci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM) ||
+ vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) ||
+ vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6);
+}
+
+static bool
+tx_offload_enabled(struct zxdh_hw *hw)
+{
+ return vtpci_with_feature(hw, ZXDH_NET_F_CSUM) ||
+ vtpci_with_feature(hw, ZXDH_NET_F_HOST_TSO4) ||
+ vtpci_with_feature(hw, ZXDH_NET_F_HOST_TSO6) ||
+ vtpci_with_feature(hw, ZXDH_NET_F_HOST_UFO);
+}
+
+static void
+zxdh_dev_free_mbufs(struct rte_eth_dev *dev)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ uint16_t nr_vq = hw->queue_num;
+ uint32_t i = 0;
+
+ const char *type = NULL;
+ struct zxdh_virtqueue *vq = NULL;
+ struct rte_mbuf *buf = NULL;
+ int32_t queue_type = 0;
+
+ if (hw->vqs == NULL)
+ return;
+
+ for (i = 0; i < nr_vq; i++) {
+ vq = hw->vqs[i];
+ if (!vq)
+ continue;
+
+ queue_type = zxdh_get_queue_type(i);
+ if (queue_type == ZXDH_VTNET_RQ)
+ type = "rxq";
+ else if (queue_type == ZXDH_VTNET_TQ)
+ type = "txq";
+ else
+ continue;
+ PMD_DRV_LOG(DEBUG, "Before freeing %s[%d] used and unused buf", type, i);
+
+ while ((buf = zxdh_virtqueue_detach_unused(vq)) != NULL)
+ rte_pktmbuf_free(buf);
+ }
+}
+
+static int32_t
+zxdh_get_available_channel(struct rte_eth_dev *dev, uint8_t queue_type)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ uint16_t base = (queue_type == ZXDH_VTNET_RQ) ? 0 : 1;
+ uint16_t i = 0;
+ uint16_t j = 0;
+ uint16_t done = 0;
+ int32_t ret = 0;
+
+ ret = zxdh_timedlock(hw, 1000);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Acquiring hw lock got failed, timeout");
+ return -1;
+ }
+
+ /* Iterate COI table and find free channel */
+ for (i = ZXDH_QUEUES_BASE / 32; i < ZXDH_TOTAL_QUEUES_NUM / 32; i++) {
+ uint32_t addr = ZXDH_QUERES_SHARE_BASE + (i * sizeof(uint32_t));
+ uint32_t var = zxdh_read_bar_reg(dev, ZXDH_BAR0_INDEX, addr);
+
+ for (j = base; j < 32; j += 2) {
+ /* Got the available channel & update COI table */
+ if ((var & (1 << j)) == 0) {
+ var |= (1 << j);
+ zxdh_write_bar_reg(dev, ZXDH_BAR0_INDEX, addr, var);
+ done = 1;
+ break;
+ }
+ }
+ if (done)
+ break;
+ }
+ zxdh_release_lock(hw);
+ /* check for no channel condition */
+ if (done != 1) {
+ PMD_DRV_LOG(ERR, "NO availd queues");
+ return -1;
+ }
+ /* reruen available channel ID */
+ return (i * 32) + j;
+}
+
+static int32_t
+zxdh_acquire_channel(struct rte_eth_dev *dev, uint16_t lch)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+
+ if (hw->channel_context[lch].valid == 1) {
+ PMD_DRV_LOG(DEBUG, "Logic channel:%u already acquired Physics channel:%u",
+ lch, hw->channel_context[lch].ph_chno);
+ return hw->channel_context[lch].ph_chno;
+ }
+ int32_t pch = zxdh_get_available_channel(dev, zxdh_get_queue_type(lch));
+
+ if (pch < 0) {
+ PMD_DRV_LOG(ERR, "Failed to acquire channel");
+ return -1;
+ }
+ hw->channel_context[lch].ph_chno = (uint16_t)pch;
+ hw->channel_context[lch].valid = 1;
+ PMD_DRV_LOG(DEBUG, "Acquire channel success lch:%u --> pch:%d", lch, pch);
+ return 0;
+}
+
+static void
+zxdh_init_vring(struct zxdh_virtqueue *vq)
+{
+ int32_t size = vq->vq_nentries;
+ uint8_t *ring_mem = vq->vq_ring_virt_mem;
+
+ memset(ring_mem, 0, vq->vq_ring_size);
+
+ vq->vq_used_cons_idx = 0;
+ vq->vq_desc_head_idx = 0;
+ vq->vq_avail_idx = 0;
+ vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+ vq->vq_free_cnt = vq->vq_nentries;
+ memset(vq->vq_descx, 0, sizeof(struct zxdh_vq_desc_extra) * vq->vq_nentries);
+ vring_init_packed(&vq->vq_packed.ring, ring_mem, ZXDH_PCI_VRING_ALIGN, size);
+ vring_desc_init_packed(vq, size);
+ virtqueue_disable_intr(vq);
+}
+
+static int32_t
+zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx)
+{
+ char vq_name[ZXDH_VIRTQUEUE_MAX_NAME_SZ] = {0};
+ char vq_hdr_name[ZXDH_VIRTQUEUE_MAX_NAME_SZ] = {0};
+ const struct rte_memzone *mz = NULL;
+ const struct rte_memzone *hdr_mz = NULL;
+ uint32_t size = 0;
+ struct zxdh_hw *hw = dev->data->dev_private;
+ struct zxdh_virtnet_rx *rxvq = NULL;
+ struct zxdh_virtnet_tx *txvq = NULL;
+ struct zxdh_virtqueue *vq = NULL;
+ size_t sz_hdr_mz = 0;
+ void *sw_ring = NULL;
+ int32_t queue_type = zxdh_get_queue_type(vtpci_logic_qidx);
+ int32_t numa_node = dev->device->numa_node;
+ uint16_t vtpci_phy_qidx = 0;
+ uint32_t vq_size = 0;
+ int32_t ret = 0;
+
+ if (hw->channel_context[vtpci_logic_qidx].valid == 0) {
+ PMD_DRV_LOG(ERR, "lch %d is invalid", vtpci_logic_qidx);
+ return -EINVAL;
+ }
+ vtpci_phy_qidx = hw->channel_context[vtpci_logic_qidx].ph_chno;
+
+ PMD_DRV_LOG(DEBUG, "vtpci_logic_qidx :%d setting up physical queue: %u on NUMA node %d",
+ vtpci_logic_qidx, vtpci_phy_qidx, numa_node);
+
+ vq_size = ZXDH_QUEUE_DEPTH;
+
+ if (ZXDH_VTPCI_OPS(hw)->set_queue_num != NULL)
+ ZXDH_VTPCI_OPS(hw)->set_queue_num(hw, vtpci_phy_qidx, vq_size);
+
+ snprintf(vq_name, sizeof(vq_name), "port%d_vq%d", dev->data->port_id, vtpci_phy_qidx);
+
+ size = RTE_ALIGN_CEIL(sizeof(*vq) + vq_size * sizeof(struct zxdh_vq_desc_extra),
+ RTE_CACHE_LINE_SIZE);
+ if (queue_type == ZXDH_VTNET_TQ) {
+ /*
+ * For each xmit packet, allocate a zxdh_net_hdr
+ * and indirect ring elements
+ */
+ sz_hdr_mz = vq_size * sizeof(struct zxdh_tx_region);
+ }
+
+ vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE, numa_node);
+ if (vq == NULL) {
+ PMD_DRV_LOG(ERR, "can not allocate vq");
+ return -ENOMEM;
+ }
+ hw->vqs[vtpci_logic_qidx] = vq;
+
+ vq->hw = hw;
+ vq->vq_queue_index = vtpci_phy_qidx;
+ vq->vq_nentries = vq_size;
+
+ vq->vq_packed.used_wrap_counter = 1;
+ vq->vq_packed.cached_flags = ZXDH_VRING_PACKED_DESC_F_AVAIL;
+ vq->vq_packed.event_flags_shadow = 0;
+ if (queue_type == ZXDH_VTNET_RQ)
+ vq->vq_packed.cached_flags |= ZXDH_VRING_DESC_F_WRITE;
+
+ /*
+ * Reserve a memzone for vring elements
+ */
+ size = vring_size(hw, vq_size, ZXDH_PCI_VRING_ALIGN);
+ vq->vq_ring_size = RTE_ALIGN_CEIL(size, ZXDH_PCI_VRING_ALIGN);
+ PMD_DRV_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", size, vq->vq_ring_size);
+
+ mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
+ numa_node, RTE_MEMZONE_IOVA_CONTIG,
+ ZXDH_PCI_VRING_ALIGN);
+ if (mz == NULL) {
+ if (rte_errno == EEXIST)
+ mz = rte_memzone_lookup(vq_name);
+ if (mz == NULL) {
+ ret = -ENOMEM;
+ goto fail_q_alloc;
+ }
+ }
+
+ memset(mz->addr, 0, mz->len);
+
+ vq->vq_ring_mem = mz->iova;
+ vq->vq_ring_virt_mem = mz->addr;
+
+ zxdh_init_vring(vq);
+
+ if (sz_hdr_mz) {
+ snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
+ dev->data->port_id, vtpci_phy_qidx);
+ hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
+ numa_node, RTE_MEMZONE_IOVA_CONTIG,
+ RTE_CACHE_LINE_SIZE);
+ if (hdr_mz == NULL) {
+ if (rte_errno == EEXIST)
+ hdr_mz = rte_memzone_lookup(vq_hdr_name);
+ if (hdr_mz == NULL) {
+ ret = -ENOMEM;
+ goto fail_q_alloc;
+ }
+ }
+ }
+
+ if (queue_type == ZXDH_VTNET_RQ) {
+ size_t sz_sw = (ZXDH_MBUF_BURST_SZ + vq_size) * sizeof(vq->sw_ring[0]);
+
+ sw_ring = rte_zmalloc_socket("sw_ring", sz_sw, RTE_CACHE_LINE_SIZE, numa_node);
+ if (!sw_ring) {
+ PMD_DRV_LOG(ERR, "can not allocate RX soft ring");
+ ret = -ENOMEM;
+ goto fail_q_alloc;
+ }
+
+ vq->sw_ring = sw_ring;
+ rxvq = &vq->rxq;
+ rxvq->vq = vq;
+ rxvq->port_id = dev->data->port_id;
+ rxvq->mz = mz;
+ } else { /* queue_type == VTNET_TQ */
+ txvq = &vq->txq;
+ txvq->vq = vq;
+ txvq->port_id = dev->data->port_id;
+ txvq->mz = mz;
+ txvq->zxdh_net_hdr_mz = hdr_mz;
+ txvq->zxdh_net_hdr_mem = hdr_mz->iova;
+ }
+
+ vq->offset = offsetof(struct rte_mbuf, buf_iova);
+ if (queue_type == ZXDH_VTNET_TQ) {
+ struct zxdh_tx_region *txr = hdr_mz->addr;
+ uint32_t i;
+
+ memset(txr, 0, vq_size * sizeof(*txr));
+ for (i = 0; i < vq_size; i++) {
+ /* first indirect descriptor is always the tx header */
+ struct zxdh_vring_packed_desc *start_dp = txr[i].tx_packed_indir;
+
+ vring_desc_init_indirect_packed(start_dp, RTE_DIM(txr[i].tx_packed_indir));
+ start_dp->addr = txvq->zxdh_net_hdr_mem + i * sizeof(*txr) +
+ offsetof(struct zxdh_tx_region, tx_hdr);
+ /* length will be updated to actual pi hdr size when xmit pkt */
+ start_dp->len = 0;
+ }
+ }
+ if (ZXDH_VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
+ PMD_DRV_LOG(ERR, "setup_queue failed");
+ return -EINVAL;
+ }
+ return 0;
+fail_q_alloc:
+ rte_free(sw_ring);
+ rte_memzone_free(hdr_mz);
+ rte_memzone_free(mz);
+ rte_free(vq);
+ return ret;
+}
+
+static int32_t
+zxdh_alloc_queues(struct rte_eth_dev *dev, uint16_t nr_vq)
+{
+ uint16_t lch;
+ struct zxdh_hw *hw = dev->data->dev_private;
+
+ hw->vqs = rte_zmalloc(NULL, sizeof(struct zxdh_virtqueue *) * nr_vq, 0);
+ if (!hw->vqs) {
+ PMD_DRV_LOG(ERR, "Failed to allocate vqs");
+ return -ENOMEM;
+ }
+ for (lch = 0; lch < nr_vq; lch++) {
+ if (zxdh_acquire_channel(dev, lch) < 0) {
+ PMD_DRV_LOG(ERR, "Failed to acquire the channels");
+ zxdh_free_queues(dev);
+ return -1;
+ }
+ if (zxdh_init_queue(dev, lch) < 0) {
+ PMD_DRV_LOG(ERR, "Failed to alloc virtio queue");
+ zxdh_free_queues(dev);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+
+static int32_t
+zxdh_dev_configure(struct rte_eth_dev *dev)
+{
+ const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
+ struct zxdh_hw *hw = dev->data->dev_private;
+ uint32_t nr_vq = 0;
+ int32_t ret = 0;
+
+ if (dev->data->nb_rx_queues != dev->data->nb_tx_queues) {
+ PMD_DRV_LOG(ERR, "nb_rx_queues=%d and nb_tx_queues=%d not equal!",
+ dev->data->nb_rx_queues, dev->data->nb_tx_queues);
+ return -EINVAL;
+ }
+ if ((dev->data->nb_rx_queues + dev->data->nb_tx_queues) >= ZXDH_QUEUES_NUM_MAX) {
+ PMD_DRV_LOG(ERR, "nb_rx_queues=%d + nb_tx_queues=%d must < (%d)!",
+ dev->data->nb_rx_queues, dev->data->nb_tx_queues,
+ ZXDH_QUEUES_NUM_MAX);
+ return -EINVAL;
+ }
+ if (rxmode->mq_mode != RTE_ETH_MQ_RX_RSS && rxmode->mq_mode != RTE_ETH_MQ_RX_NONE) {
+ PMD_DRV_LOG(ERR, "Unsupported Rx multi queue mode %d", rxmode->mq_mode);
+ return -EINVAL;
+ }
+
+ if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
+ PMD_DRV_LOG(ERR, "Unsupported Tx multi queue mode %d", txmode->mq_mode);
+ return -EINVAL;
+ }
+ if (rxmode->mq_mode != RTE_ETH_MQ_RX_RSS && rxmode->mq_mode != RTE_ETH_MQ_RX_NONE) {
+ PMD_DRV_LOG(ERR, "Unsupported Rx multi queue mode %d", rxmode->mq_mode);
+ return -EINVAL;
+ }
+
+ if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
+ PMD_DRV_LOG(ERR, "Unsupported Tx multi queue mode %d", txmode->mq_mode);
+ return -EINVAL;
+ }
+
+ ret = zxdh_features_update(hw, rxmode, txmode);
+ if (ret < 0)
+ return ret;
+
+ /* check if lsc interrupt feature is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
+ PMD_DRV_LOG(ERR, "link status not supported by host");
+ return -ENOTSUP;
+ }
+ }
+
+ hw->has_tx_offload = tx_offload_enabled(hw);
+ hw->has_rx_offload = rx_offload_enabled(hw);
+
+ nr_vq = dev->data->nb_rx_queues + dev->data->nb_tx_queues;
+ if (nr_vq == hw->queue_num)
+ return 0;
+
+ PMD_DRV_LOG(DEBUG, "queue changed need reset ");
+ /* Reset the device although not necessary at startup */
+ zxdh_pci_reset(hw);
+
+ /* Tell the host we've noticed this device. */
+ zxdh_pci_set_status(hw, ZXDH_CONFIG_STATUS_ACK);
+
+ /* Tell the host we've known how to drive the device. */
+ zxdh_pci_set_status(hw, ZXDH_CONFIG_STATUS_DRIVER);
+ /* The queue needs to be released when reconfiguring*/
+ if (hw->vqs != NULL) {
+ zxdh_dev_free_mbufs(dev);
+ zxdh_free_queues(dev);
+ }
+
+ hw->queue_num = nr_vq;
+ ret = zxdh_alloc_queues(dev, nr_vq);
+ if (ret < 0)
+ return ret;
+
+ zxdh_datach_set(dev);
+
+ if (zxdh_configure_intr(dev) < 0) {
+ PMD_DRV_LOG(ERR, "Failed to configure interrupt");
+ zxdh_free_queues(dev);
+ return -1;
+ }
+
+ zxdh_pci_reinit_complete(hw);
+
+ return ret;
+}
+
/* dev_ops for zxdh, bare necessities for basic operation */
static const struct eth_dev_ops zxdh_eth_dev_ops = {
+ .dev_configure = zxdh_dev_configure,
.dev_infos_get = zxdh_dev_infos_get,
};
diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h
index 89c5a9bb5f..28e78b0086 100644
--- a/drivers/net/zxdh/zxdh_ethdev.h
+++ b/drivers/net/zxdh/zxdh_ethdev.h
@@ -31,6 +31,13 @@ extern "C" {
#define ZXDH_TX_QUEUES_MAX 128U
#define ZXDH_MIN_RX_BUFSIZE 64
#define ZXDH_MAX_RX_PKTLEN 14000U
+#define ZXDH_QUEUE_DEPTH 1024
+#define ZXDH_QUEUES_BASE 0
+#define ZXDH_TOTAL_QUEUES_NUM 4096
+#define ZXDH_QUEUES_NUM_MAX 256
+#define ZXDH_QUERES_SHARE_BASE (0x5000)
+
+#define ZXDH_MBUF_BURST_SZ 64
union zxdh_virport_num {
uint16_t vport;
@@ -43,6 +50,11 @@ union zxdh_virport_num {
};
};
+struct zxdh_chnl_context {
+ uint16_t valid;
+ uint16_t ph_chno;
+};
+
struct zxdh_hw {
struct rte_eth_dev *eth_dev;
struct zxdh_pci_common_cfg *common_cfg;
@@ -50,6 +62,7 @@ struct zxdh_hw {
struct rte_intr_handle *risc_intr;
struct rte_intr_handle *dtb_intr;
struct zxdh_virtqueue **vqs;
+ struct zxdh_chnl_context channel_context[ZXDH_QUEUES_NUM_MAX];
union zxdh_virport_num vport;
uint64_t bar_addr[ZXDH_NUM_BARS];
@@ -63,6 +76,7 @@ struct zxdh_hw {
uint16_t device_id;
uint16_t port_id;
uint16_t vfid;
+ uint16_t queue_num;
uint8_t *isr;
uint8_t weak_barriers;
@@ -75,6 +89,8 @@ struct zxdh_hw {
uint8_t msg_chan_init;
uint8_t phyport;
uint8_t panel_id;
+ uint8_t has_tx_offload;
+ uint8_t has_rx_offload;
};
uint16_t zxdh_vport_to_vfid(union zxdh_virport_num v);
diff --git a/drivers/net/zxdh/zxdh_pci.c b/drivers/net/zxdh/zxdh_pci.c
index 8e7a9c1213..06d3f92b20 100644
--- a/drivers/net/zxdh/zxdh_pci.c
+++ b/drivers/net/zxdh/zxdh_pci.c
@@ -115,6 +115,93 @@ zxdh_get_isr(struct zxdh_hw *hw)
return rte_read8(hw->isr);
}
+static uint16_t
+zxdh_get_queue_num(struct zxdh_hw *hw, uint16_t queue_id)
+{
+ rte_write16(queue_id, &hw->common_cfg->queue_select);
+ return rte_read16(&hw->common_cfg->queue_size);
+}
+
+static void
+zxdh_set_queue_num(struct zxdh_hw *hw, uint16_t queue_id, uint16_t vq_size)
+{
+ rte_write16(queue_id, &hw->common_cfg->queue_select);
+ rte_write16(vq_size, &hw->common_cfg->queue_size);
+}
+
+static int32_t
+check_vq_phys_addr_ok(struct zxdh_virtqueue *vq)
+{
+ if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> (ZXDH_PCI_QUEUE_ADDR_SHIFT + 32)) {
+ PMD_DRV_LOG(ERR, "vring address shouldn't be above 16TB!");
+ return 0;
+ }
+ return 1;
+}
+
+static inline void
+io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
+{
+ rte_write32(val & ((1ULL << 32) - 1), lo);
+ rte_write32(val >> 32, hi);
+}
+
+static int32_t
+zxdh_setup_queue(struct zxdh_hw *hw, struct zxdh_virtqueue *vq)
+{
+ uint64_t desc_addr = 0;
+ uint64_t avail_addr = 0;
+ uint64_t used_addr = 0;
+ uint16_t notify_off = 0;
+
+ if (!check_vq_phys_addr_ok(vq))
+ return -1;
+
+ desc_addr = vq->vq_ring_mem;
+ avail_addr = desc_addr + vq->vq_nentries * sizeof(struct zxdh_vring_desc);
+ if (vtpci_packed_queue(vq->hw)) {
+ used_addr = RTE_ALIGN_CEIL((avail_addr +
+ sizeof(struct zxdh_vring_packed_desc_event)),
+ ZXDH_PCI_VRING_ALIGN);
+ } else {
+ used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct zxdh_vring_avail,
+ ring[vq->vq_nentries]), ZXDH_PCI_VRING_ALIGN);
+ }
+
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+
+ io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,
+ &hw->common_cfg->queue_desc_hi);
+ io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo,
+ &hw->common_cfg->queue_avail_hi);
+ io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo,
+ &hw->common_cfg->queue_used_hi);
+
+ notify_off = rte_read16(&hw->common_cfg->queue_notify_off); /* default 0 */
+ notify_off = 0;
+ vq->notify_addr = (void *)((uint8_t *)hw->notify_base +
+ notify_off * hw->notify_off_multiplier);
+
+ rte_write16(1, &hw->common_cfg->queue_enable);
+
+ return 0;
+}
+
+static void
+zxdh_del_queue(struct zxdh_hw *hw, struct zxdh_virtqueue *vq)
+{
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+
+ io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
+ &hw->common_cfg->queue_desc_hi);
+ io_write64_twopart(0, &hw->common_cfg->queue_avail_lo,
+ &hw->common_cfg->queue_avail_hi);
+ io_write64_twopart(0, &hw->common_cfg->queue_used_lo,
+ &hw->common_cfg->queue_used_hi);
+
+ rte_write16(0, &hw->common_cfg->queue_enable);
+}
+
const struct zxdh_pci_ops zxdh_dev_pci_ops = {
.read_dev_cfg = zxdh_read_dev_config,
.write_dev_cfg = zxdh_write_dev_config,
@@ -125,6 +212,10 @@ const struct zxdh_pci_ops zxdh_dev_pci_ops = {
.set_queue_irq = zxdh_set_queue_irq,
.set_config_irq = zxdh_set_config_irq,
.get_isr = zxdh_get_isr,
+ .get_queue_num = zxdh_get_queue_num,
+ .set_queue_num = zxdh_set_queue_num,
+ .setup_queue = zxdh_setup_queue,
+ .del_queue = zxdh_del_queue,
};
uint8_t
@@ -154,6 +245,21 @@ zxdh_pci_reset(struct zxdh_hw *hw)
PMD_DRV_LOG(INFO, "port %u device reset %u ms done", hw->port_id, retry);
}
+void
+zxdh_pci_reinit_complete(struct zxdh_hw *hw)
+{
+ zxdh_pci_set_status(hw, ZXDH_CONFIG_STATUS_DRIVER_OK);
+}
+
+void
+zxdh_pci_set_status(struct zxdh_hw *hw, uint8_t status)
+{
+ if (status != ZXDH_CONFIG_STATUS_RESET)
+ status |= ZXDH_VTPCI_OPS(hw)->get_status(hw);
+
+ ZXDH_VTPCI_OPS(hw)->set_status(hw, status);
+}
+
static void
*get_cfg_addr(struct rte_pci_device *dev, struct zxdh_pci_cap *cap)
{
diff --git a/drivers/net/zxdh/zxdh_pci.h b/drivers/net/zxdh/zxdh_pci.h
index 41e47d5d3b..2e7aa9c410 100644
--- a/drivers/net/zxdh/zxdh_pci.h
+++ b/drivers/net/zxdh/zxdh_pci.h
@@ -29,7 +29,20 @@ enum zxdh_msix_status {
/* Vector value used to disable MSI for queue. */
#define ZXDH_MSI_NO_VECTOR 0x7F
+#define ZXDH_PCI_VRING_ALIGN 4096
+
+#define ZXDH_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
+#define ZXDH_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
+#define ZXDH_NET_F_MTU 3 /* Initial MTU advice. */
#define ZXDH_NET_F_MAC 5 /* Host has given MAC address. */
+#define ZXDH_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
+#define ZXDH_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
+#define ZXDH_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in. */
+#define ZXDH_NET_F_GUEST_UFO 10 /* Guest can handle UFO in. */
+
+#define ZXDH_NET_F_HOST_UFO 14 /* Host can handle UFO in. */
+#define ZXDH_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */
+#define ZXDH_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */
#define ZXDH_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */
#define ZXDH_NET_F_STATUS 16 /* zxdh_net_config.status available */
#define ZXDH_NET_F_MQ 22 /* Device supports Receive Flow Steering */
@@ -53,6 +66,7 @@ enum zxdh_msix_status {
#define ZXDH_CONFIG_STATUS_FEATURES_OK 0x08
#define ZXDH_CONFIG_STATUS_DEV_NEED_RESET 0x40
#define ZXDH_CONFIG_STATUS_FAILED 0x80
+#define ZXDH_PCI_QUEUE_ADDR_SHIFT 12
struct zxdh_net_config {
/* The config defining mac address (if ZXDH_NET_F_MAC) */
@@ -103,11 +117,18 @@ struct zxdh_pci_common_cfg {
uint32_t queue_used_hi; /* read-write */
};
-static inline int32_t vtpci_with_feature(struct zxdh_hw *hw, uint64_t bit)
+static inline int32_t
+vtpci_with_feature(struct zxdh_hw *hw, uint64_t bit)
{
return (hw->guest_features & (1ULL << bit)) != 0;
}
+static inline int32_t
+vtpci_packed_queue(struct zxdh_hw *hw)
+{
+ return vtpci_with_feature(hw, ZXDH_F_RING_PACKED);
+}
+
struct zxdh_pci_ops {
void (*read_dev_cfg)(struct zxdh_hw *hw, size_t offset, void *dst, int32_t len);
void (*write_dev_cfg)(struct zxdh_hw *hw, size_t offset, const void *src, int32_t len);
@@ -120,6 +141,11 @@ struct zxdh_pci_ops {
uint16_t (*set_queue_irq)(struct zxdh_hw *hw, struct zxdh_virtqueue *vq, uint16_t vec);
uint16_t (*set_config_irq)(struct zxdh_hw *hw, uint16_t vec);
uint8_t (*get_isr)(struct zxdh_hw *hw);
+ uint16_t (*get_queue_num)(struct zxdh_hw *hw, uint16_t queue_id);
+ void (*set_queue_num)(struct zxdh_hw *hw, uint16_t queue_id, uint16_t vq_size);
+
+ int32_t (*setup_queue)(struct zxdh_hw *hw, struct zxdh_virtqueue *vq);
+ void (*del_queue)(struct zxdh_hw *hw, struct zxdh_virtqueue *vq);
};
struct zxdh_hw_internal {
@@ -141,6 +167,8 @@ void zxdh_get_pci_dev_config(struct zxdh_hw *hw);
uint16_t zxdh_pci_get_features(struct zxdh_hw *hw);
enum zxdh_msix_status zxdh_pci_msix_detect(struct rte_pci_device *dev);
uint8_t zxdh_pci_isr(struct zxdh_hw *hw);
+void zxdh_pci_reinit_complete(struct zxdh_hw *hw);
+void zxdh_pci_set_status(struct zxdh_hw *hw, uint8_t status);
#ifdef __cplusplus
}
diff --git a/drivers/net/zxdh/zxdh_queue.c b/drivers/net/zxdh/zxdh_queue.c
new file mode 100644
index 0000000000..462a88b23c
--- /dev/null
+++ b/drivers/net/zxdh/zxdh_queue.c
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 ZTE Corporation
+ */
+
+#include <stdint.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+
+#include "zxdh_queue.h"
+#include "zxdh_logs.h"
+#include "zxdh_pci.h"
+#include "zxdh_common.h"
+#include "zxdh_msg.h"
+
+struct rte_mbuf *
+zxdh_virtqueue_detach_unused(struct zxdh_virtqueue *vq)
+{
+ struct rte_mbuf *cookie = NULL;
+ int32_t idx = 0;
+
+ if (vq == NULL)
+ return NULL;
+
+ for (idx = 0; idx < vq->vq_nentries; idx++) {
+ cookie = vq->vq_descx[idx].cookie;
+ if (cookie != NULL) {
+ vq->vq_descx[idx].cookie = NULL;
+ return cookie;
+ }
+ }
+ return NULL;
+}
+
+static int32_t
+zxdh_release_channel(struct rte_eth_dev *dev)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ uint16_t nr_vq = hw->queue_num;
+ uint32_t var = 0;
+ uint32_t addr = 0;
+ uint32_t widx = 0;
+ uint32_t bidx = 0;
+ uint16_t pch = 0;
+ uint16_t lch = 0;
+ int32_t ret = 0;
+
+ ret = zxdh_timedlock(hw, 1000);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Acquiring hw lock got failed, timeout");
+ return -1;
+ }
+
+ for (lch = 0; lch < nr_vq; lch++) {
+ if (hw->channel_context[lch].valid == 0) {
+ PMD_DRV_LOG(DEBUG, "Logic channel %d does not need to release", lch);
+ continue;
+ }
+
+ pch = hw->channel_context[lch].ph_chno;
+ widx = pch / 32;
+ bidx = pch % 32;
+
+ addr = ZXDH_QUERES_SHARE_BASE + (widx * sizeof(uint32_t));
+ var = zxdh_read_bar_reg(dev, ZXDH_BAR0_INDEX, addr);
+ var &= ~(1 << bidx);
+ zxdh_write_bar_reg(dev, ZXDH_BAR0_INDEX, addr, var);
+
+ hw->channel_context[lch].valid = 0;
+ hw->channel_context[lch].ph_chno = 0;
+ }
+
+ zxdh_release_lock(hw);
+
+ return 0;
+}
+
+int32_t
+zxdh_get_queue_type(uint16_t vtpci_queue_idx)
+{
+ if (vtpci_queue_idx % 2 == 0)
+ return ZXDH_VTNET_RQ;
+ else
+ return ZXDH_VTNET_TQ;
+}
+
+int32_t
+zxdh_free_queues(struct rte_eth_dev *dev)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ uint16_t nr_vq = hw->queue_num;
+ struct zxdh_virtqueue *vq = NULL;
+ int32_t queue_type = 0;
+ uint16_t i = 0;
+
+ if (hw->vqs == NULL)
+ return 0;
+
+ if (zxdh_release_channel(dev) < 0) {
+ PMD_DRV_LOG(ERR, "Failed to clear coi table");
+ return -1;
+ }
+
+ for (i = 0; i < nr_vq; i++) {
+ vq = hw->vqs[i];
+ if (vq == NULL)
+ continue;
+
+ ZXDH_VTPCI_OPS(hw)->del_queue(hw, vq);
+ queue_type = zxdh_get_queue_type(i);
+ if (queue_type == ZXDH_VTNET_RQ) {
+ rte_free(vq->sw_ring);
+ rte_memzone_free(vq->rxq.mz);
+ } else if (queue_type == ZXDH_VTNET_TQ) {
+ rte_memzone_free(vq->txq.mz);
+ rte_memzone_free(vq->txq.zxdh_net_hdr_mz);
+ }
+
+ rte_free(vq);
+ hw->vqs[i] = NULL;
+ PMD_DRV_LOG(DEBUG, "Release to queue %d success!", i);
+ }
+
+ rte_free(hw->vqs);
+ hw->vqs = NULL;
+
+ return 0;
+}
diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h
index 9c790cd9d3..686cabfef1 100644
--- a/drivers/net/zxdh/zxdh_queue.h
+++ b/drivers/net/zxdh/zxdh_queue.h
@@ -11,11 +11,30 @@
#include "zxdh_ethdev.h"
#include "zxdh_rxtx.h"
+#include "zxdh_pci.h"
#ifdef __cplusplus
extern "C" {
#endif
+enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 };
+
+#define ZXDH_VIRTQUEUE_MAX_NAME_SZ 32
+#define ZXDH_RQ_QUEUE_IDX 0
+#define ZXDH_TQ_QUEUE_IDX 1
+#define ZXDH_MAX_TX_INDIRECT 8
+
+/* This marks a buffer as write-only (otherwise read-only). */
+#define ZXDH_VRING_DESC_F_WRITE 2
+/* This flag means the descriptor was made available by the driver */
+#define ZXDH_VRING_PACKED_DESC_F_AVAIL (1 << (7))
+
+#define ZXDH_RING_EVENT_FLAGS_ENABLE 0x0
+#define ZXDH_RING_EVENT_FLAGS_DISABLE 0x1
+#define ZXDH_RING_EVENT_FLAGS_DESC 0x2
+
+#define ZXDH_VQ_RING_DESC_CHAIN_END 32768
+
/*
* ring descriptors: 16 bytes.
* These can chain together via "next".
@@ -27,6 +46,19 @@ struct zxdh_vring_desc {
uint16_t next; /* We chain unused descriptors via this. */
} __rte_packed;
+struct zxdh_vring_used_elem {
+ /* Index of start of used descriptor chain. */
+ uint32_t id;
+ /* Total length of the descriptor chain which was written to. */
+ uint32_t len;
+};
+
+struct zxdh_vring_used {
+ uint16_t flags;
+ uint16_t idx;
+ struct zxdh_vring_used_elem ring[];
+} __rte_packed;
+
struct zxdh_vring_avail {
uint16_t flags;
uint16_t idx;
@@ -103,6 +135,149 @@ struct zxdh_virtqueue {
struct zxdh_vq_desc_extra vq_descx[];
} __rte_packed;
+struct zxdh_type_hdr {
+ uint8_t port; /* bit[0:1] 00-np 01-DRS 10-DTP */
+ uint8_t pd_len;
+ uint8_t num_buffers;
+ uint8_t reserved;
+} __rte_packed; /* 4B */
+
+struct zxdh_pi_hdr {
+ uint8_t pi_len;
+ uint8_t pkt_type;
+ uint16_t vlan_id;
+ uint32_t ipv6_extend;
+ uint16_t l3_offset;
+ uint16_t l4_offset;
+ uint8_t phy_port;
+ uint8_t pkt_flag_hi8;
+ uint16_t pkt_flag_lw16;
+ union {
+ struct {
+ uint64_t sa_idx;
+ uint8_t reserved_8[8];
+ } dl;
+ struct {
+ uint32_t lro_flag;
+ uint32_t lro_mss;
+ uint16_t err_code;
+ uint16_t pm_id;
+ uint16_t pkt_len;
+ uint8_t reserved[2];
+ } ul;
+ };
+} __rte_packed; /* 32B */
+
+struct zxdh_pd_hdr_dl {
+ uint32_t ol_flag;
+ uint8_t tag_idx;
+ uint8_t tag_data;
+ uint16_t dst_vfid;
+ uint32_t svlan_insert;
+ uint32_t cvlan_insert;
+} __rte_packed; /* 16B */
+
+struct zxdh_net_hdr_dl {
+ struct zxdh_type_hdr type_hdr; /* 4B */
+ struct zxdh_pi_hdr pi_hdr; /* 32B */
+ struct zxdh_pd_hdr_dl pd_hdr; /* 16B */
+} __rte_packed;
+
+struct zxdh_pd_hdr_ul {
+ uint32_t pkt_flag;
+ uint32_t rss_hash;
+ uint32_t fd;
+ uint32_t striped_vlan_tci;
+ uint8_t tag_idx;
+ uint8_t tag_data;
+ uint16_t src_vfid;
+ uint16_t pkt_type_out;
+ uint16_t pkt_type_in;
+} __rte_packed; /* 24B */
+
+struct zxdh_net_hdr_ul {
+ struct zxdh_type_hdr type_hdr; /* 4B */
+ struct zxdh_pi_hdr pi_hdr; /* 32B */
+ struct zxdh_pd_hdr_ul pd_hdr; /* 24B */
+} __rte_packed; /* 60B */
+
+struct zxdh_tx_region {
+ struct zxdh_net_hdr_dl tx_hdr;
+ union {
+ struct zxdh_vring_desc tx_indir[ZXDH_MAX_TX_INDIRECT];
+ struct zxdh_vring_packed_desc tx_packed_indir[ZXDH_MAX_TX_INDIRECT];
+ } __rte_packed;
+};
+
+static inline size_t
+vring_size(struct zxdh_hw *hw, uint32_t num, unsigned long align)
+{
+ size_t size;
+
+ if (vtpci_packed_queue(hw)) {
+ size = num * sizeof(struct zxdh_vring_packed_desc);
+ size += sizeof(struct zxdh_vring_packed_desc_event);
+ size = RTE_ALIGN_CEIL(size, align);
+ size += sizeof(struct zxdh_vring_packed_desc_event);
+ return size;
+ }
+
+ size = num * sizeof(struct zxdh_vring_desc);
+ size += sizeof(struct zxdh_vring_avail) + (num * sizeof(uint16_t));
+ size = RTE_ALIGN_CEIL(size, align);
+ size += sizeof(struct zxdh_vring_used) + (num * sizeof(struct zxdh_vring_used_elem));
+ return size;
+}
+
+static inline void
+vring_init_packed(struct zxdh_vring_packed *vr, uint8_t *p,
+ unsigned long align, uint32_t num)
+{
+ vr->num = num;
+ vr->desc = (struct zxdh_vring_packed_desc *)p;
+ vr->driver = (struct zxdh_vring_packed_desc_event *)(p +
+ vr->num * sizeof(struct zxdh_vring_packed_desc));
+ vr->device = (struct zxdh_vring_packed_desc_event *)RTE_ALIGN_CEIL(((uintptr_t)vr->driver +
+ sizeof(struct zxdh_vring_packed_desc_event)), align);
+}
+
+static inline void
+vring_desc_init_packed(struct zxdh_virtqueue *vq, int32_t n)
+{
+ int32_t i = 0;
+
+ for (i = 0; i < n - 1; i++) {
+ vq->vq_packed.ring.desc[i].id = i;
+ vq->vq_descx[i].next = i + 1;
+ }
+ vq->vq_packed.ring.desc[i].id = i;
+ vq->vq_descx[i].next = ZXDH_VQ_RING_DESC_CHAIN_END;
+}
+
+static inline void
+vring_desc_init_indirect_packed(struct zxdh_vring_packed_desc *dp, int32_t n)
+{
+ int32_t i = 0;
+
+ for (i = 0; i < n; i++) {
+ dp[i].id = (uint16_t)i;
+ dp[i].flags = ZXDH_VRING_DESC_F_WRITE;
+ }
+}
+
+static inline void
+virtqueue_disable_intr(struct zxdh_virtqueue *vq)
+{
+ if (vq->vq_packed.event_flags_shadow != ZXDH_RING_EVENT_FLAGS_DISABLE) {
+ vq->vq_packed.event_flags_shadow = ZXDH_RING_EVENT_FLAGS_DISABLE;
+ vq->vq_packed.ring.driver->desc_event_flags = vq->vq_packed.event_flags_shadow;
+ }
+}
+
+struct rte_mbuf *zxdh_virtqueue_detach_unused(struct zxdh_virtqueue *vq);
+int32_t zxdh_free_queues(struct rte_eth_dev *dev);
+int32_t zxdh_get_queue_type(uint16_t vtpci_queue_idx);
+
#ifdef __cplusplus
}
#endif
--
2.27.0
[-- Attachment #1.1.2: Type: text/html , Size: 89242 bytes --]
next prev parent reply other threads:[~2024-11-04 12:07 UTC|newest]
Thread overview: 108+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-09-10 12:00 [PATCH v4] net/zxdh: Provided zxdh basic init Junlong Wang
2024-09-24 1:35 ` [v4] " Junlong Wang
2024-09-25 22:39 ` [PATCH v4] " Ferruh Yigit
2024-09-26 6:49 ` [v4] " Junlong Wang
2024-10-07 21:43 ` [PATCH v4] " Stephen Hemminger
2024-10-15 5:43 ` [PATCH v5 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-15 5:43 ` [PATCH v5 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-10-15 5:44 ` [PATCH v5 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-15 5:44 ` [PATCH v5 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-15 5:44 ` [PATCH v5 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-15 5:44 ` [PATCH v5 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-15 5:44 ` [PATCH v5 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-15 5:44 ` [PATCH v5 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-15 5:44 ` [PATCH v5 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-15 5:44 ` [PATCH v5 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-15 15:37 ` Stephen Hemminger
2024-10-15 15:57 ` Stephen Hemminger
2024-10-16 8:16 ` [PATCH v6 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-16 8:16 ` [PATCH v6 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-10-16 8:18 ` [PATCH v6 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-16 8:18 ` [PATCH v6 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-16 8:18 ` [PATCH v6 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-16 8:18 ` [PATCH v6 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-21 8:50 ` Thomas Monjalon
2024-10-21 10:56 ` Junlong Wang
2024-10-16 8:18 ` [PATCH v6 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-21 8:52 ` Thomas Monjalon
2024-10-16 8:18 ` [PATCH v6 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-16 8:18 ` [PATCH v6 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-21 8:54 ` Thomas Monjalon
2024-10-16 8:18 ` [PATCH v6 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-18 5:18 ` [v6,9/9] " Junlong Wang
2024-10-18 6:48 ` David Marchand
2024-10-19 11:17 ` Junlong Wang
2024-10-21 9:03 ` [PATCH v6 1/9] net/zxdh: add zxdh ethdev pmd driver Thomas Monjalon
2024-10-22 12:20 ` [PATCH v7 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-22 12:20 ` [PATCH v7 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-10-30 9:01 ` [PATCH v8 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-10-30 9:01 ` [PATCH v8 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-11-01 6:21 ` [PATCH v9 0/9] net/zxdh: introduce net zxdh driver Junlong Wang
2024-11-01 6:21 ` [PATCH v9 1/9] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-11-02 0:57 ` Ferruh Yigit
2024-11-04 11:58 ` [PATCH v10 00/10] net/zxdh: introduce net zxdh driver Junlong Wang
2024-11-04 11:58 ` [PATCH v10 01/10] net/zxdh: add zxdh ethdev pmd driver Junlong Wang
2024-11-07 10:32 ` [PATCH v10 00/10] net/zxdh: introduce net zxdh driver Junlong Wang
2024-11-04 11:58 ` [PATCH v10 02/10] net/zxdh: add logging implementation Junlong Wang
2024-11-04 11:58 ` [PATCH v10 03/10] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-11-04 11:58 ` [PATCH v10 04/10] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-11-04 11:58 ` [PATCH v10 05/10] net/zxdh: add msg chan enable implementation Junlong Wang
2024-11-04 11:58 ` [PATCH v10 06/10] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-11-04 11:58 ` [PATCH v10 07/10] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-11-04 11:58 ` [PATCH v10 08/10] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-11-04 11:58 ` Junlong Wang [this message]
2024-11-04 11:58 ` [PATCH v10 10/10] net/zxdh: add zxdh dev close ops Junlong Wang
2024-11-06 0:40 ` [PATCH v10 00/10] net/zxdh: introduce net zxdh driver Ferruh Yigit
2024-11-07 9:28 ` Ferruh Yigit
2024-11-07 9:58 ` Ferruh Yigit
2024-11-01 6:21 ` [PATCH v9 2/9] net/zxdh: add logging implementation Junlong Wang
2024-11-02 1:02 ` Ferruh Yigit
2024-11-04 2:44 ` [v9,2/9] " Junlong Wang
2024-11-01 6:21 ` [PATCH v9 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-11-02 1:01 ` Ferruh Yigit
2024-11-01 6:21 ` [PATCH v9 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-11-02 1:00 ` Ferruh Yigit
2024-11-04 2:47 ` Junlong Wang
2024-11-01 6:21 ` [PATCH v9 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-11-01 6:21 ` [PATCH v9 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-11-02 1:06 ` Ferruh Yigit
2024-11-04 3:30 ` [v9,6/9] " Junlong Wang
2024-11-01 6:21 ` [PATCH v9 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-11-02 1:07 ` Ferruh Yigit
2024-11-01 6:21 ` [PATCH v9 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-11-01 6:21 ` [PATCH v9 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-11-02 0:56 ` [PATCH v9 0/9] net/zxdh: introduce net zxdh driver Ferruh Yigit
2024-11-04 2:42 ` Junlong Wang
2024-11-04 8:46 ` Ferruh Yigit
2024-11-04 9:52 ` David Marchand
2024-11-04 11:46 ` Junlong Wang
2024-11-04 22:47 ` Thomas Monjalon
2024-11-05 9:39 ` Junlong Wang
2024-11-06 0:38 ` Ferruh Yigit
2024-10-30 9:01 ` [PATCH v8 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-30 9:01 ` [PATCH v8 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-30 14:55 ` David Marchand
2024-10-30 9:01 ` [PATCH v8 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-30 9:01 ` [PATCH v8 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-30 9:01 ` [PATCH v8 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-30 9:01 ` [PATCH v8 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-30 9:01 ` [PATCH v8 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-30 9:01 ` [PATCH v8 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-22 12:20 ` [PATCH v7 2/9] net/zxdh: add logging implementation Junlong Wang
2024-10-22 12:20 ` [PATCH v7 3/9] net/zxdh: add zxdh device pci init implementation Junlong Wang
2024-10-27 16:47 ` Stephen Hemminger
2024-10-27 16:47 ` Stephen Hemminger
2024-10-22 12:20 ` [PATCH v7 4/9] net/zxdh: add msg chan and msg hwlock init Junlong Wang
2024-10-22 12:20 ` [PATCH v7 5/9] net/zxdh: add msg chan enable implementation Junlong Wang
2024-10-26 17:05 ` Thomas Monjalon
2024-10-22 12:20 ` [PATCH v7 6/9] net/zxdh: add zxdh get device backend infos Junlong Wang
2024-10-22 12:20 ` [PATCH v7 7/9] net/zxdh: add configure zxdh intr implementation Junlong Wang
2024-10-27 17:07 ` Stephen Hemminger
2024-10-22 12:20 ` [PATCH v7 8/9] net/zxdh: add zxdh dev infos get ops Junlong Wang
2024-10-22 12:20 ` [PATCH v7 9/9] net/zxdh: add zxdh dev configure ops Junlong Wang
2024-10-24 11:31 ` [v7,9/9] " Junlong Wang
2024-10-25 9:48 ` Junlong Wang
2024-10-26 2:32 ` Junlong Wang
2024-10-27 16:40 ` [PATCH v7 9/9] " Stephen Hemminger
2024-10-27 17:03 ` Stephen Hemminger
2024-10-27 16:58 ` Stephen Hemminger
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241104115856.2795213-10-wang.junlong1@zte.com.cn \
--to=wang.junlong1@zte.com.cn \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@amd.com \
--cc=wang.yong19@zte.com.cn \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).