provided zxdh dev configure ops for queue check,reset,alloc resources,etc. Signed-off-by: Junlong Wang --- drivers/net/zxdh/meson.build | 1 + drivers/net/zxdh/zxdh_common.c | 119 +++++++++ drivers/net/zxdh/zxdh_common.h | 12 + drivers/net/zxdh/zxdh_ethdev.c | 457 +++++++++++++++++++++++++++++++++ drivers/net/zxdh/zxdh_ethdev.h | 20 +- drivers/net/zxdh/zxdh_pci.c | 97 +++++++ drivers/net/zxdh/zxdh_pci.h | 31 +++ drivers/net/zxdh/zxdh_queue.c | 131 ++++++++++ drivers/net/zxdh/zxdh_queue.h | 176 +++++++++++++ drivers/net/zxdh/zxdh_rxtx.h | 4 + 10 files changed, 1045 insertions(+), 3 deletions(-) create mode 100644 drivers/net/zxdh/zxdh_queue.c diff --git a/drivers/net/zxdh/meson.build b/drivers/net/zxdh/meson.build index a16db47f89..b96aa5a27e 100644 --- a/drivers/net/zxdh/meson.build +++ b/drivers/net/zxdh/meson.build @@ -18,4 +18,5 @@ sources = files( 'zxdh_pci.c', 'zxdh_msg.c', 'zxdh_common.c', + 'zxdh_queue.c', ) diff --git a/drivers/net/zxdh/zxdh_common.c b/drivers/net/zxdh/zxdh_common.c index 61993980c3..ed4396a6af 100644 --- a/drivers/net/zxdh/zxdh_common.c +++ b/drivers/net/zxdh/zxdh_common.c @@ -20,6 +20,7 @@ #define ZXDH_COMMON_TABLE_WRITE 1 #define ZXDH_COMMON_FIELD_PHYPORT 6 +#define ZXDH_COMMON_FIELD_DATACH 3 #define RSC_TBL_CONTENT_LEN_MAX (257 * 2) @@ -247,3 +248,121 @@ int32_t zxdh_pannelid_get(struct rte_eth_dev *dev, uint8_t *pannelid) int32_t ret = zxdh_get_res_panel_id(¶m, pannelid); return ret; } + +uint32_t zxdh_read_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg) +{ + struct zxdh_hw *hw = dev->data->dev_private; + uint64_t baseaddr = (uint64_t)(hw->bar_addr[bar]); + uint32_t val = *((volatile uint32_t *)(baseaddr + reg)); + return val; +} + +void zxdh_write_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg, uint32_t val) +{ + struct zxdh_hw *hw = dev->data->dev_private; + uint64_t baseaddr = (uint64_t)(hw->bar_addr[bar]); + *((volatile uint32_t *)(baseaddr + reg)) = val; +} + +int32_t zxdh_acquire_lock(struct zxdh_hw *hw) +{ + uint32_t var = zxdh_read_comm_reg((uint64_t)hw->common_cfg, ZXDH_VF_LOCK_REG); + + /* check whether lock is used */ + if (!(var & ZXDH_VF_LOCK_ENABLE_MASK)) + return -1; + + return 0; +} + +int32_t zxdh_release_lock(struct zxdh_hw *hw) +{ + uint32_t var = zxdh_read_comm_reg((uint64_t)hw->common_cfg, ZXDH_VF_LOCK_REG); + + if (var & ZXDH_VF_LOCK_ENABLE_MASK) { + var &= ~ZXDH_VF_LOCK_ENABLE_MASK; + zxdh_write_comm_reg((uint64_t)hw->common_cfg, ZXDH_VF_LOCK_REG, var); + return 0; + } + + return -1; +} + +uint32_t zxdh_read_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg) +{ + uint32_t val = *((volatile uint32_t *)(pci_comm_cfg_baseaddr + reg)); + return val; +} + +void zxdh_write_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg, uint32_t val) +{ + *((volatile uint32_t *)(pci_comm_cfg_baseaddr + reg)) = val; +} + +static int32_t zxdh_common_table_write(struct zxdh_hw *hw, uint8_t field, + void *buff, uint16_t buff_size) +{ + struct zxdh_pci_bar_msg desc; + struct zxdh_msg_recviver_mem msg_rsp; + int32_t ret = 0; + + if (!hw->msg_chan_init) { + PMD_DRV_LOG(ERR, "Bar messages channel not initialized"); + return -1; + } + if (buff_size != 0 && buff == NULL) { + PMD_DRV_LOG(ERR, "Buff is invalid"); + return -1; + } + + ret = zxdh_fill_common_msg(hw, &desc, ZXDH_COMMON_TABLE_WRITE, + field, buff, buff_size); + + if (ret != 0) { + PMD_DRV_LOG(ERR, "Failed to fill common msg"); + return ret; + } + + ret = zxdh_send_command(hw, &desc, BAR_MODULE_TBL, &msg_rsp); + if (ret != 0) + goto free_msg_data; + + ret = zxdh_common_rsp_check(&msg_rsp, NULL, 0); + if (ret != 0) + goto free_rsp_data; + +free_rsp_data: + rte_free(msg_rsp.recv_buffer); +free_msg_data: + rte_free(desc.payload_addr); + return ret; +} + +int32_t zxdh_datach_set(struct rte_eth_dev *dev) +{ + /* payload: queue_num(2byte) + pch1(2byte) + ** + pchn */ + struct zxdh_hw *hw = dev->data->dev_private; + uint16_t buff_size = (hw->queue_num + 1) * 2; + void *buff = rte_zmalloc(NULL, buff_size, 0); + + if (unlikely(buff == NULL)) { + PMD_DRV_LOG(ERR, "Failed to allocate buff"); + return -ENOMEM; + } + memset(buff, 0, buff_size); + uint16_t *pdata = (uint16_t *)buff; + *pdata++ = hw->queue_num; + uint16_t i; + + for (i = 0; i < hw->queue_num; i++) + *(pdata + i) = hw->channel_context[i].ph_chno; + + int32_t ret = zxdh_common_table_write(hw, ZXDH_COMMON_FIELD_DATACH, + (void *)buff, buff_size); + + if (ret != 0) + PMD_DRV_LOG(ERR, "Failed to setup data channel of common table"); + + rte_free(buff); + return ret; +} diff --git a/drivers/net/zxdh/zxdh_common.h b/drivers/net/zxdh/zxdh_common.h index ec7011e820..24bbc7fee0 100644 --- a/drivers/net/zxdh/zxdh_common.h +++ b/drivers/net/zxdh/zxdh_common.h @@ -14,6 +14,10 @@ extern "C" { #include "zxdh_ethdev.h" +#define ZXDH_VF_LOCK_REG 0x90 +#define ZXDH_VF_LOCK_ENABLE_MASK 0x1 +#define ZXDH_ACQUIRE_CHANNEL_NUM_MAX 10 + struct zxdh_res_para { uint64_t virt_addr; uint16_t pcie_id; @@ -23,6 +27,14 @@ struct zxdh_res_para { int32_t zxdh_phyport_get(struct rte_eth_dev *dev, uint8_t *phyport); int32_t zxdh_pannelid_get(struct rte_eth_dev *dev, uint8_t *pannelid); +uint32_t zxdh_read_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg); +void zxdh_write_bar_reg(struct rte_eth_dev *dev, uint32_t bar, uint32_t reg, uint32_t val); +int32_t zxdh_release_lock(struct zxdh_hw *hw); +int32_t zxdh_acquire_lock(struct zxdh_hw *hw); +uint32_t zxdh_read_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg); +void zxdh_write_comm_reg(uint64_t pci_comm_cfg_baseaddr, uint32_t reg, uint32_t val); +int32_t zxdh_datach_set(struct rte_eth_dev *dev); + #ifdef __cplusplus } #endif diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c index 65b649a156..a1997facdb 100644 --- a/drivers/net/zxdh/zxdh_ethdev.c +++ b/drivers/net/zxdh/zxdh_ethdev.c @@ -11,6 +11,7 @@ #include "zxdh_pci.h" #include "zxdh_msg.h" #include "zxdh_common.h" +#include "zxdh_queue.h" #define ZXDH_MIN_RX_BUFSIZE 64 #define ZXDH_MAX_RX_PKTLEN 14000U @@ -376,8 +377,464 @@ static int32_t zxdh_configure_intr(struct rte_eth_dev *dev) return ret; } +static int32_t zxdh_features_update(struct zxdh_hw *hw, + const struct rte_eth_rxmode *rxmode, + const struct rte_eth_txmode *txmode) +{ + uint64_t rx_offloads = rxmode->offloads; + uint64_t tx_offloads = txmode->offloads; + uint64_t req_features = hw->guest_features; + + if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM)) + req_features |= (1ULL << ZXDH_NET_F_GUEST_CSUM); + + if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) + req_features |= (1ULL << ZXDH_NET_F_GUEST_TSO4) | + (1ULL << ZXDH_NET_F_GUEST_TSO6); + + if (tx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM)) + req_features |= (1ULL << ZXDH_NET_F_CSUM); + + if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO) + req_features |= (1ULL << ZXDH_NET_F_HOST_TSO4) | + (1ULL << ZXDH_NET_F_HOST_TSO6); + + if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_TSO) + req_features |= (1ULL << ZXDH_NET_F_HOST_UFO); + + req_features = req_features & hw->host_features; + hw->guest_features = req_features; + + VTPCI_OPS(hw)->set_features(hw, req_features); + + if ((rx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) && + !vtpci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM)) { + PMD_DRV_LOG(ERR, "rx checksum not available on this host"); + return -ENOTSUP; + } + + if ((rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) && + (!vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) || + !vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6))) { + PMD_DRV_LOG(ERR, "Large Receive Offload not available on this host"); + return -ENOTSUP; + } + return 0; +} + +static bool rx_offload_enabled(struct zxdh_hw *hw) +{ + return vtpci_with_feature(hw, ZXDH_NET_F_GUEST_CSUM) || + vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO4) || + vtpci_with_feature(hw, ZXDH_NET_F_GUEST_TSO6); +} + +static bool tx_offload_enabled(struct zxdh_hw *hw) +{ + return vtpci_with_feature(hw, ZXDH_NET_F_CSUM) || + vtpci_with_feature(hw, ZXDH_NET_F_HOST_TSO4) || + vtpci_with_feature(hw, ZXDH_NET_F_HOST_TSO6) || + vtpci_with_feature(hw, ZXDH_NET_F_HOST_UFO); +} + +static void zxdh_dev_free_mbufs(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + uint16_t nr_vq = hw->queue_num; + uint32_t i = 0; + + const char *type = NULL; + struct virtqueue *vq = NULL; + struct rte_mbuf *buf = NULL; + int32_t queue_type = 0; + + if (hw->vqs == NULL) + return; + + for (i = 0; i < nr_vq; i++) { + vq = hw->vqs[i]; + if (!vq) + continue; + + queue_type = zxdh_get_queue_type(i); + if (queue_type == VTNET_RQ) + type = "rxq"; + else if (queue_type == VTNET_TQ) + type = "txq"; + else + continue; + PMD_INIT_LOG(DEBUG, "Before freeing %s[%d] used and unused buf", type, i); + + while ((buf = zxdh_virtqueue_detach_unused(vq)) != NULL) + rte_pktmbuf_free(buf); + } +} + +static int32_t zxdh_get_available_channel(struct rte_eth_dev *dev, uint8_t queue_type) +{ + struct zxdh_hw *hw = dev->data->dev_private; + uint16_t base = (queue_type == VTNET_RQ) ? 0 : 1; + uint16_t i = 0; + uint16_t j = 0; + uint16_t done = 0; + uint16_t timeout = 0; + + while ((timeout++) < ZXDH_ACQUIRE_CHANNEL_NUM_MAX) { + rte_delay_us_block(1000); + /* acquire hw lock */ + if (zxdh_acquire_lock(hw) < 0) { + PMD_INIT_LOG(ERR, "Acquiring hw lock got failed, timeout: %d", timeout); + continue; + } + /* Iterate COI table and find free channel */ + for (i = ZXDH_QUEUES_BASE / 32; i < ZXDH_TOTAL_QUEUES_NUM / 32; i++) { + uint32_t addr = ZXDH_QUERES_SHARE_BASE + (i * sizeof(uint32_t)); + uint32_t var = zxdh_read_bar_reg(dev, ZXDH_BAR0_INDEX, addr); + + for (j = base; j < 32; j += 2) { + /* Got the available channel & update COI table */ + if ((var & (1 << j)) == 0) { + var |= (1 << j); + zxdh_write_bar_reg(dev, ZXDH_BAR0_INDEX, addr, var); + done = 1; + break; + } + } + if (done) + break; + } + break; + } + if (timeout >= ZXDH_ACQUIRE_CHANNEL_NUM_MAX) { + PMD_INIT_LOG(ERR, "Failed to acquire channel"); + return -1; + } + zxdh_release_lock(hw); + /* check for no channel condition */ + if (done != 1) { + PMD_INIT_LOG(ERR, "NO availd queues"); + return -1; + } + /* return available channel ID */ + return (i * 32) + j; +} + +static int32_t zxdh_acquire_channel(struct rte_eth_dev *dev, uint16_t lch) +{ + struct zxdh_hw *hw = dev->data->dev_private; + + if (hw->channel_context[lch].valid == 1) { + PMD_INIT_LOG(DEBUG, "Logic channel:%u already acquired Physics channel:%u", + lch, hw->channel_context[lch].ph_chno); + return hw->channel_context[lch].ph_chno; + } + int32_t pch = zxdh_get_available_channel(dev, zxdh_get_queue_type(lch)); + + if (pch < 0) { + PMD_INIT_LOG(ERR, "Failed to acquire channel"); + return -1; + } + hw->channel_context[lch].ph_chno = (uint16_t)pch; + hw->channel_context[lch].valid = 1; + PMD_INIT_LOG(DEBUG, "Acquire channel success lch:%u --> pch:%d", lch, pch); + return 0; +} + +static void zxdh_init_vring(struct virtqueue *vq) +{ + int32_t size = vq->vq_nentries; + uint8_t *ring_mem = vq->vq_ring_virt_mem; + + memset(ring_mem, 0, vq->vq_ring_size); + + vq->vq_used_cons_idx = 0; + vq->vq_desc_head_idx = 0; + vq->vq_avail_idx = 0; + vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); + vq->vq_free_cnt = vq->vq_nentries; + memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries); + vring_init_packed(&vq->vq_packed.ring, ring_mem, ZXDH_PCI_VRING_ALIGN, size); + vring_desc_init_packed(vq, size); + virtqueue_disable_intr(vq); +} + +static int32_t zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx) +{ + char vq_name[VIRTQUEUE_MAX_NAME_SZ] = {0}; + char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ] = {0}; + const struct rte_memzone *mz = NULL; + const struct rte_memzone *hdr_mz = NULL; + uint32_t size = 0; + struct zxdh_hw *hw = dev->data->dev_private; + struct virtnet_rx *rxvq = NULL; + struct virtnet_tx *txvq = NULL; + struct virtqueue *vq = NULL; + size_t sz_hdr_mz = 0; + void *sw_ring = NULL; + int32_t queue_type = zxdh_get_queue_type(vtpci_logic_qidx); + int32_t numa_node = dev->device->numa_node; + uint16_t vtpci_phy_qidx = 0; + uint32_t vq_size = 0; + int32_t ret = 0; + + if (hw->channel_context[vtpci_logic_qidx].valid == 0) { + PMD_INIT_LOG(ERR, "lch %d is invalid", vtpci_logic_qidx); + return -EINVAL; + } + vtpci_phy_qidx = hw->channel_context[vtpci_logic_qidx].ph_chno; + + PMD_INIT_LOG(DEBUG, "vtpci_logic_qidx :%d setting up physical queue: %u on NUMA node %d", + vtpci_logic_qidx, vtpci_phy_qidx, numa_node); + + vq_size = ZXDH_QUEUE_DEPTH; + + if (VTPCI_OPS(hw)->set_queue_num != NULL) + VTPCI_OPS(hw)->set_queue_num(hw, vtpci_phy_qidx, vq_size); + + snprintf(vq_name, sizeof(vq_name), "port%d_vq%d", dev->data->port_id, vtpci_phy_qidx); + + size = RTE_ALIGN_CEIL(sizeof(*vq) + vq_size * sizeof(struct vq_desc_extra), + RTE_CACHE_LINE_SIZE); + if (queue_type == VTNET_TQ) { + /* + * For each xmit packet, allocate a zxdh_net_hdr + * and indirect ring elements + */ + sz_hdr_mz = vq_size * sizeof(struct zxdh_tx_region); + } + + vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE, numa_node); + if (vq == NULL) { + PMD_INIT_LOG(ERR, "can not allocate vq"); + return -ENOMEM; + } + hw->vqs[vtpci_logic_qidx] = vq; + + vq->hw = hw; + vq->vq_queue_index = vtpci_phy_qidx; + vq->vq_nentries = vq_size; + + vq->vq_packed.used_wrap_counter = 1; + vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL; + vq->vq_packed.event_flags_shadow = 0; + if (queue_type == VTNET_RQ) + vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE; + + /* + * Reserve a memzone for vring elements + */ + size = vring_size(hw, vq_size, ZXDH_PCI_VRING_ALIGN); + vq->vq_ring_size = RTE_ALIGN_CEIL(size, ZXDH_PCI_VRING_ALIGN); + PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", size, vq->vq_ring_size); + + mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, + numa_node, RTE_MEMZONE_IOVA_CONTIG, + ZXDH_PCI_VRING_ALIGN); + if (mz == NULL) { + if (rte_errno == EEXIST) + mz = rte_memzone_lookup(vq_name); + if (mz == NULL) { + ret = -ENOMEM; + goto fail_q_alloc; + } + } + + memset(mz->addr, 0, mz->len); + + vq->vq_ring_mem = mz->iova; + vq->vq_ring_virt_mem = mz->addr; + + zxdh_init_vring(vq); + + if (sz_hdr_mz) { + snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr", + dev->data->port_id, vtpci_phy_qidx); + hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz, + numa_node, RTE_MEMZONE_IOVA_CONTIG, + RTE_CACHE_LINE_SIZE); + if (hdr_mz == NULL) { + if (rte_errno == EEXIST) + hdr_mz = rte_memzone_lookup(vq_hdr_name); + if (hdr_mz == NULL) { + ret = -ENOMEM; + goto fail_q_alloc; + } + } + } + + if (queue_type == VTNET_RQ) { + size_t sz_sw = (ZXDH_MBUF_BURST_SZ + vq_size) * sizeof(vq->sw_ring[0]); + + sw_ring = rte_zmalloc_socket("sw_ring", sz_sw, RTE_CACHE_LINE_SIZE, numa_node); + if (!sw_ring) { + PMD_INIT_LOG(ERR, "can not allocate RX soft ring"); + ret = -ENOMEM; + goto fail_q_alloc; + } + + vq->sw_ring = sw_ring; + rxvq = &vq->rxq; + rxvq->vq = vq; + rxvq->port_id = dev->data->port_id; + rxvq->mz = mz; + } else { + txvq = &vq->txq; + txvq->vq = vq; + txvq->port_id = dev->data->port_id; + txvq->mz = mz; + txvq->virtio_net_hdr_mz = hdr_mz; + txvq->virtio_net_hdr_mem = hdr_mz->iova; + } + + vq->offset = offsetof(struct rte_mbuf, buf_iova); + if (queue_type == VTNET_TQ) { + struct zxdh_tx_region *txr = hdr_mz->addr; + uint32_t i; + + memset(txr, 0, vq_size * sizeof(*txr)); + for (i = 0; i < vq_size; i++) { + /* first indirect descriptor is always the tx header */ + struct vring_packed_desc *start_dp = txr[i].tx_packed_indir; + + vring_desc_init_indirect_packed(start_dp, RTE_DIM(txr[i].tx_packed_indir)); + start_dp->addr = txvq->virtio_net_hdr_mem + i * sizeof(*txr) + + offsetof(struct zxdh_tx_region, tx_hdr); + /* length will be updated to actual pi hdr size when xmit pkt */ + start_dp->len = 0; + } + } + if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) { + PMD_INIT_LOG(ERR, "setup_queue failed"); + return -EINVAL; + } + return 0; +fail_q_alloc: + rte_free(sw_ring); + rte_memzone_free(hdr_mz); + rte_memzone_free(mz); + rte_free(vq); + return ret; +} + +static int32_t zxdh_alloc_queues(struct rte_eth_dev *dev, uint16_t nr_vq) +{ + uint16_t lch; + struct zxdh_hw *hw = dev->data->dev_private; + + hw->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0); + if (!hw->vqs) { + PMD_INIT_LOG(ERR, "Failed to allocate vqs"); + return -ENOMEM; + } + for (lch = 0; lch < nr_vq; lch++) { + if (zxdh_acquire_channel(dev, lch) < 0) { + PMD_INIT_LOG(ERR, "Failed to acquire the channels"); + zxdh_free_queues(dev); + return -1; + } + if (zxdh_init_queue(dev, lch) < 0) { + PMD_INIT_LOG(ERR, "Failed to alloc virtio queue"); + zxdh_free_queues(dev); + return -1; + } + } + return 0; +} + + +static int32_t zxdh_dev_configure(struct rte_eth_dev *dev) +{ + const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode; + struct zxdh_hw *hw = dev->data->dev_private; + uint32_t nr_vq = 0; + int32_t ret = 0; + + if (dev->data->nb_rx_queues != dev->data->nb_tx_queues) { + PMD_INIT_LOG(ERR, "nb_rx_queues=%d and nb_tx_queues=%d not equal!", + dev->data->nb_rx_queues, dev->data->nb_tx_queues); + return -EINVAL; + } + if ((dev->data->nb_rx_queues + dev->data->nb_tx_queues) >= ZXDH_QUEUES_NUM_MAX) { + PMD_INIT_LOG(ERR, "nb_rx_queues=%d + nb_tx_queues=%d must < (%d)!", + dev->data->nb_rx_queues, dev->data->nb_tx_queues, + ZXDH_QUEUES_NUM_MAX); + return -EINVAL; + } + if (rxmode->mq_mode != RTE_ETH_MQ_RX_RSS && rxmode->mq_mode != RTE_ETH_MQ_RX_NONE) { + PMD_DRV_LOG(ERR, "Unsupported Rx multi queue mode %d", rxmode->mq_mode); + return -EINVAL; + } + + if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) { + PMD_DRV_LOG(ERR, "Unsupported Tx multi queue mode %d", txmode->mq_mode); + return -EINVAL; + } + if (rxmode->mq_mode != RTE_ETH_MQ_RX_RSS && rxmode->mq_mode != RTE_ETH_MQ_RX_NONE) { + PMD_DRV_LOG(ERR, "Unsupported Rx multi queue mode %d", rxmode->mq_mode); + return -EINVAL; + } + + if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) { + PMD_DRV_LOG(ERR, "Unsupported Tx multi queue mode %d", txmode->mq_mode); + return -EINVAL; + } + + ret = zxdh_features_update(hw, rxmode, txmode); + if (ret < 0) + return ret; + + /* check if lsc interrupt feature is enabled */ + if (dev->data->dev_conf.intr_conf.lsc) { + if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) { + PMD_DRV_LOG(ERR, "link status not supported by host"); + return -ENOTSUP; + } + } + + hw->has_tx_offload = tx_offload_enabled(hw); + hw->has_rx_offload = rx_offload_enabled(hw); + + nr_vq = dev->data->nb_rx_queues + dev->data->nb_tx_queues; + if (nr_vq == hw->queue_num) + return 0; + + PMD_DRV_LOG(DEBUG, "queue changed need reset "); + /* Reset the device although not necessary at startup */ + zxdh_vtpci_reset(hw); + + /* Tell the host we've noticed this device. */ + zxdh_vtpci_set_status(hw, ZXDH_CONFIG_STATUS_ACK); + + /* Tell the host we've known how to drive the device. */ + zxdh_vtpci_set_status(hw, ZXDH_CONFIG_STATUS_DRIVER); + /* The queue needs to be released when reconfiguring*/ + if (hw->vqs != NULL) { + zxdh_dev_free_mbufs(dev); + zxdh_free_queues(dev); + } + + hw->queue_num = nr_vq; + ret = zxdh_alloc_queues(dev, nr_vq); + if (ret < 0) + return ret; + + zxdh_datach_set(dev); + + if (zxdh_configure_intr(dev) < 0) { + PMD_INIT_LOG(ERR, "Failed to configure interrupt"); + zxdh_free_queues(dev); + return -1; + } + + zxdh_vtpci_reinit_complete(hw); + + return ret; +} + /* dev_ops for zxdh, bare necessities for basic operation */ static const struct eth_dev_ops zxdh_eth_dev_ops = { + .dev_configure = zxdh_dev_configure, .dev_infos_get = zxdh_dev_infos_get, }; diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h index 7c5f5940cb..d547785e2a 100644 --- a/drivers/net/zxdh/zxdh_ethdev.h +++ b/drivers/net/zxdh/zxdh_ethdev.h @@ -14,10 +14,8 @@ extern "C" { #include #include -#include "zxdh_queue.h" - /* ZXDH PCI vendor/device ID. */ -#define PCI_VENDOR_ID_ZTE 0x1cf2 +#define PCI_VENDOR_ID_ZTE 0x1cf2 #define ZXDH_E310_PF_DEVICEID 0x8061 #define ZXDH_E310_VF_DEVICEID 0x8062 @@ -30,9 +28,16 @@ extern "C" { #define ZXDH_RX_QUEUES_MAX 128U #define ZXDH_TX_QUEUES_MAX 128U +#define ZXDH_QUEUE_DEPTH 1024 +#define ZXDH_QUEUES_BASE 0 +#define ZXDH_TOTAL_QUEUES_NUM 4096 +#define ZXDH_QUEUES_NUM_MAX 256 +#define ZXDH_QUERES_SHARE_BASE (0x5000) #define ZXDH_NUM_BARS 2 +#define ZXDH_MBUF_BURST_SZ 64 + union VPORT { uint16_t vport; struct { @@ -44,6 +49,11 @@ union VPORT { }; }; +struct chnl_context { + uint16_t valid; + uint16_t ph_chno; +}; + struct zxdh_hw { struct rte_eth_dev *eth_dev; struct zxdh_pci_common_cfg *common_cfg; @@ -51,6 +61,7 @@ struct zxdh_hw { struct rte_intr_handle *risc_intr; struct rte_intr_handle *dtb_intr; struct virtqueue **vqs; + struct chnl_context channel_context[ZXDH_QUEUES_NUM_MAX]; union VPORT vport; uint64_t bar_addr[ZXDH_NUM_BARS]; @@ -64,6 +75,7 @@ struct zxdh_hw { uint16_t device_id; uint16_t port_id; uint16_t vfid; + uint16_t queue_num; uint8_t *isr; uint8_t weak_barriers; @@ -75,6 +87,8 @@ struct zxdh_hw { uint8_t phyport; uint8_t panel_id; uint8_t msg_chan_init; + uint8_t has_tx_offload; + uint8_t has_rx_offload; }; uint16_t vport_to_vfid(union VPORT v); diff --git a/drivers/net/zxdh/zxdh_pci.c b/drivers/net/zxdh/zxdh_pci.c index c63b7eee44..b174e35325 100644 --- a/drivers/net/zxdh/zxdh_pci.c +++ b/drivers/net/zxdh/zxdh_pci.c @@ -115,6 +115,86 @@ static uint8_t zxdh_get_isr(struct zxdh_hw *hw) return rte_read8(hw->isr); } +static uint16_t zxdh_get_queue_num(struct zxdh_hw *hw, uint16_t queue_id) +{ + rte_write16(queue_id, &hw->common_cfg->queue_select); + return rte_read16(&hw->common_cfg->queue_size); +} + +static void zxdh_set_queue_num(struct zxdh_hw *hw, uint16_t queue_id, uint16_t vq_size) +{ + rte_write16(queue_id, &hw->common_cfg->queue_select); + rte_write16(vq_size, &hw->common_cfg->queue_size); +} + +static int32_t check_vq_phys_addr_ok(struct virtqueue *vq) +{ + if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> (ZXDH_PCI_QUEUE_ADDR_SHIFT + 32)) { + PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!"); + return 0; + } + return 1; +} + +static inline void io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi) +{ + rte_write32(val & ((1ULL << 32) - 1), lo); + rte_write32(val >> 32, hi); +} + +static int32_t zxdh_setup_queue(struct zxdh_hw *hw, struct virtqueue *vq) +{ + uint64_t desc_addr = 0; + uint64_t avail_addr = 0; + uint64_t used_addr = 0; + uint16_t notify_off = 0; + + if (!check_vq_phys_addr_ok(vq)) + return -1; + + desc_addr = vq->vq_ring_mem; + avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); + if (vtpci_packed_queue(vq->hw)) { + used_addr = RTE_ALIGN_CEIL((avail_addr + sizeof(struct vring_packed_desc_event)), + ZXDH_PCI_VRING_ALIGN); + } else { + used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, + ring[vq->vq_nentries]), ZXDH_PCI_VRING_ALIGN); + } + + rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); + + io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo, + &hw->common_cfg->queue_desc_hi); + io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo, + &hw->common_cfg->queue_avail_hi); + io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo, + &hw->common_cfg->queue_used_hi); + + notify_off = rte_read16(&hw->common_cfg->queue_notify_off); /* default 0 */ + notify_off = 0; + vq->notify_addr = (void *)((uint8_t *)hw->notify_base + + notify_off * hw->notify_off_multiplier); + + rte_write16(1, &hw->common_cfg->queue_enable); + + return 0; +} + +static void zxdh_del_queue(struct zxdh_hw *hw, struct virtqueue *vq) +{ + rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); + + io_write64_twopart(0, &hw->common_cfg->queue_desc_lo, + &hw->common_cfg->queue_desc_hi); + io_write64_twopart(0, &hw->common_cfg->queue_avail_lo, + &hw->common_cfg->queue_avail_hi); + io_write64_twopart(0, &hw->common_cfg->queue_used_lo, + &hw->common_cfg->queue_used_hi); + + rte_write16(0, &hw->common_cfg->queue_enable); +} + const struct zxdh_pci_ops zxdh_dev_pci_ops = { .read_dev_cfg = zxdh_read_dev_config, .write_dev_cfg = zxdh_write_dev_config, @@ -125,6 +205,10 @@ const struct zxdh_pci_ops zxdh_dev_pci_ops = { .set_queue_irq = zxdh_set_queue_irq, .set_config_irq = zxdh_set_config_irq, .get_isr = zxdh_get_isr, + .get_queue_num = zxdh_get_queue_num, + .set_queue_num = zxdh_set_queue_num, + .setup_queue = zxdh_setup_queue, + .del_queue = zxdh_del_queue, }; uint8_t zxdh_vtpci_isr(struct zxdh_hw *hw) @@ -151,6 +235,19 @@ void zxdh_vtpci_reset(struct zxdh_hw *hw) PMD_INIT_LOG(INFO, "port %u device reset %u ms done", hw->port_id, retry); } +void zxdh_vtpci_reinit_complete(struct zxdh_hw *hw) +{ + zxdh_vtpci_set_status(hw, ZXDH_CONFIG_STATUS_DRIVER_OK); +} + +void zxdh_vtpci_set_status(struct zxdh_hw *hw, uint8_t status) +{ + if (status != ZXDH_CONFIG_STATUS_RESET) + status |= VTPCI_OPS(hw)->get_status(hw); + + VTPCI_OPS(hw)->set_status(hw, status); +} + static void *get_cfg_addr(struct rte_pci_device *dev, struct zxdh_pci_cap *cap) { uint8_t bar = cap->bar; diff --git a/drivers/net/zxdh/zxdh_pci.h b/drivers/net/zxdh/zxdh_pci.h index 677dadd5c8..bb6714ecc0 100644 --- a/drivers/net/zxdh/zxdh_pci.h +++ b/drivers/net/zxdh/zxdh_pci.h @@ -12,7 +12,9 @@ extern "C" { #include #include +#include #include +#include #include "zxdh_ethdev.h" @@ -34,8 +36,21 @@ enum zxdh_msix_status { #define PCI_CAP_ID_MSIX 0x11 #define PCI_MSIX_ENABLE 0x8000 +#define ZXDH_PCI_VRING_ALIGN 4096 + +#define ZXDH_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ +#define ZXDH_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ +#define ZXDH_NET_F_MTU 3 /* Initial MTU advice. */ #define ZXDH_NET_F_MAC 5 /* Host has given MAC address. */ +#define ZXDH_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */ +#define ZXDH_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */ +#define ZXDH_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in. */ +#define ZXDH_NET_F_GUEST_UFO 10 /* Guest can handle UFO in. */ + +#define ZXDH_NET_F_HOST_UFO 14 /* Host can handle UFO in. */ +#define ZXDH_NET_F_HOST_TSO4 11 /* Host can handle TSOv4 in. */ +#define ZXDH_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */ #define ZXDH_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */ #define ZXDH_NET_F_STATUS 16 /* zxdh_net_config.status available */ #define ZXDH_NET_F_MQ 22 /* Device supports Receive Flow Steering */ @@ -60,6 +75,8 @@ enum zxdh_msix_status { #define ZXDH_CONFIG_STATUS_DEV_NEED_RESET 0x40 #define ZXDH_CONFIG_STATUS_FAILED 0x80 +#define ZXDH_PCI_QUEUE_ADDR_SHIFT 12 + struct zxdh_net_config { /* The config defining mac address (if ZXDH_NET_F_MAC) */ uint8_t mac[RTE_ETHER_ADDR_LEN]; @@ -122,6 +139,12 @@ static inline int32_t vtpci_with_feature(struct zxdh_hw *hw, uint64_t bit) return (hw->guest_features & (1ULL << bit)) != 0; } +static inline int32_t vtpci_packed_queue(struct zxdh_hw *hw) +{ + return vtpci_with_feature(hw, ZXDH_F_RING_PACKED); +} + + struct zxdh_pci_ops { void (*read_dev_cfg)(struct zxdh_hw *hw, size_t offset, void *dst, int32_t len); void (*write_dev_cfg)(struct zxdh_hw *hw, size_t offset, const void *src, int32_t len); @@ -134,6 +157,11 @@ struct zxdh_pci_ops { uint16_t (*set_queue_irq)(struct zxdh_hw *hw, struct virtqueue *vq, uint16_t vec); uint16_t (*set_config_irq)(struct zxdh_hw *hw, uint16_t vec); uint8_t (*get_isr)(struct zxdh_hw *hw); + uint16_t (*get_queue_num)(struct zxdh_hw *hw, uint16_t queue_id); + void (*set_queue_num)(struct zxdh_hw *hw, uint16_t queue_id, uint16_t vq_size); + + int32_t (*setup_queue)(struct zxdh_hw *hw, struct virtqueue *vq); + void (*del_queue)(struct zxdh_hw *hw, struct virtqueue *vq); }; struct zxdh_hw_internal { @@ -156,6 +184,9 @@ uint16_t zxdh_vtpci_get_features(struct zxdh_hw *hw); uint8_t zxdh_vtpci_isr(struct zxdh_hw *hw); enum zxdh_msix_status zxdh_vtpci_msix_detect(struct rte_pci_device *dev); +void zxdh_vtpci_reinit_complete(struct zxdh_hw *hw); +void zxdh_vtpci_set_status(struct zxdh_hw *hw, uint8_t status); + #ifdef __cplusplus } #endif diff --git a/drivers/net/zxdh/zxdh_queue.c b/drivers/net/zxdh/zxdh_queue.c new file mode 100644 index 0000000000..005b2a5578 --- /dev/null +++ b/drivers/net/zxdh/zxdh_queue.c @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 ZTE Corporation + */ + +#include +#include +#include + +#include "zxdh_queue.h" +#include "zxdh_logs.h" +#include "zxdh_pci.h" +#include "zxdh_common.h" +#include "zxdh_msg.h" + +struct rte_mbuf *zxdh_virtqueue_detach_unused(struct virtqueue *vq) +{ + struct rte_mbuf *cookie = NULL; + int32_t idx = 0; + + if (vq == NULL) + return NULL; + + for (idx = 0; idx < vq->vq_nentries; idx++) { + cookie = vq->vq_descx[idx].cookie; + if (cookie != NULL) { + vq->vq_descx[idx].cookie = NULL; + return cookie; + } + } + return NULL; +} + +static int32_t zxdh_release_channel(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + uint16_t nr_vq = hw->queue_num; + uint32_t var = 0; + uint32_t addr = 0; + uint32_t widx = 0; + uint32_t bidx = 0; + uint16_t pch = 0; + uint16_t lch = 0; + uint16_t timeout = 0; + + while ((timeout++) < ZXDH_ACQUIRE_CHANNEL_NUM_MAX) { + if (zxdh_acquire_lock(hw) != 0) { + PMD_INIT_LOG(ERR, + "Could not acquire lock to release channel, timeout %d", timeout); + continue; + } + break; + } + + if (timeout >= ZXDH_ACQUIRE_CHANNEL_NUM_MAX) { + PMD_INIT_LOG(ERR, "Acquire lock timeout"); + return -1; + } + + for (lch = 0; lch < nr_vq; lch++) { + if (hw->channel_context[lch].valid == 0) { + PMD_INIT_LOG(DEBUG, "Logic channel %d does not need to release", lch); + continue; + } + + pch = hw->channel_context[lch].ph_chno; + widx = pch / 32; + bidx = pch % 32; + + addr = ZXDH_QUERES_SHARE_BASE + (widx * sizeof(uint32_t)); + var = zxdh_read_bar_reg(dev, ZXDH_BAR0_INDEX, addr); + var &= ~(1 << bidx); + zxdh_write_bar_reg(dev, ZXDH_BAR0_INDEX, addr, var); + + hw->channel_context[lch].valid = 0; + hw->channel_context[lch].ph_chno = 0; + } + + zxdh_release_lock(hw); + + return 0; +} + +int32_t zxdh_get_queue_type(uint16_t vtpci_queue_idx) +{ + if (vtpci_queue_idx % 2 == 0) + return VTNET_RQ; + else + return VTNET_TQ; +} + +int32_t zxdh_free_queues(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + uint16_t nr_vq = hw->queue_num; + struct virtqueue *vq = NULL; + int32_t queue_type = 0; + uint16_t i = 0; + + if (hw->vqs == NULL) + return 0; + + if (zxdh_release_channel(dev) < 0) { + PMD_INIT_LOG(ERR, "Failed to clear coi table"); + return -1; + } + + for (i = 0; i < nr_vq; i++) { + vq = hw->vqs[i]; + if (vq == NULL) + continue; + + VTPCI_OPS(hw)->del_queue(hw, vq); + queue_type = zxdh_get_queue_type(i); + if (queue_type == VTNET_RQ) { + rte_free(vq->sw_ring); + rte_memzone_free(vq->rxq.mz); + } else if (queue_type == VTNET_TQ) { + rte_memzone_free(vq->txq.mz); + rte_memzone_free(vq->txq.virtio_net_hdr_mz); + } + + rte_free(vq); + hw->vqs[i] = NULL; + PMD_INIT_LOG(DEBUG, "Release to queue %d success!", i); + } + + rte_free(hw->vqs); + hw->vqs = NULL; + + return 0; +} diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h index 336c0701f4..e0fa5502e8 100644 --- a/drivers/net/zxdh/zxdh_queue.h +++ b/drivers/net/zxdh/zxdh_queue.h @@ -15,6 +15,25 @@ extern "C" { #include "zxdh_ethdev.h" #include "zxdh_rxtx.h" +#include "zxdh_pci.h" + +enum { VTNET_RQ = 0, VTNET_TQ = 1 }; + +#define VIRTQUEUE_MAX_NAME_SZ 32 +#define RQ_QUEUE_IDX 0 +#define TQ_QUEUE_IDX 1 +#define ZXDH_MAX_TX_INDIRECT 8 + +/* This marks a buffer as write-only (otherwise read-only). */ +#define VRING_DESC_F_WRITE 2 +/* This flag means the descriptor was made available by the driver */ +#define VRING_PACKED_DESC_F_AVAIL (1 << (7)) + +#define RING_EVENT_FLAGS_ENABLE 0x0 +#define RING_EVENT_FLAGS_DISABLE 0x1 +#define RING_EVENT_FLAGS_DESC 0x2 + +#define VQ_RING_DESC_CHAIN_END 32768 /** ring descriptors: 16 bytes. * These can chain together via "next". @@ -26,6 +45,19 @@ struct vring_desc { uint16_t next; /* We chain unused descriptors via this. */ }; +struct vring_used_elem { + /* Index of start of used descriptor chain. */ + uint32_t id; + /* Total length of the descriptor chain which was written to. */ + uint32_t len; +}; + +struct vring_used { + uint16_t flags; + uint16_t idx; + struct vring_used_elem ring[0]; +}; + struct vring_avail { uint16_t flags; uint16_t idx; @@ -102,4 +134,148 @@ struct virtqueue { struct vq_desc_extra vq_descx[0]; }; +struct zxdh_type_hdr { + uint8_t port; /* bit[0:1] 00-np 01-DRS 10-DTP */ + uint8_t pd_len; + uint8_t num_buffers; + uint8_t reserved; +} __rte_packed; /* 4B */ + +struct zxdh_pi_hdr { + uint8_t pi_len; + uint8_t pkt_type; + uint16_t vlan_id; + uint32_t ipv6_extend; + uint16_t l3_offset; + uint16_t l4_offset; + uint8_t phy_port; + uint8_t pkt_flag_hi8; + uint16_t pkt_flag_lw16; + union { + struct { + uint64_t sa_idx; + uint8_t reserved_8[8]; + } dl; + struct { + uint32_t lro_flag; + uint32_t lro_mss; + uint16_t err_code; + uint16_t pm_id; + uint16_t pkt_len; + uint8_t reserved[2]; + } ul; + }; +} __rte_packed; /* 32B */ + +struct zxdh_pd_hdr_dl { + uint32_t ol_flag; + uint8_t tag_idx; + uint8_t tag_data; + uint16_t dst_vfid; + uint32_t svlan_insert; + uint32_t cvlan_insert; +} __rte_packed; /* 16B */ + +struct zxdh_net_hdr_dl { + struct zxdh_type_hdr type_hdr; /* 4B */ + struct zxdh_pi_hdr pi_hdr; /* 32B */ + struct zxdh_pd_hdr_dl pd_hdr; /* 16B */ +} __rte_packed; + +struct zxdh_pd_hdr_ul { + uint32_t pkt_flag; + uint32_t rss_hash; + uint32_t fd; + uint32_t striped_vlan_tci; + /* ovs */ + uint8_t tag_idx; + uint8_t tag_data; + uint16_t src_vfid; + /* */ + uint16_t pkt_type_out; + uint16_t pkt_type_in; +} __rte_packed; /* 24B */ + +struct zxdh_net_hdr_ul { + struct zxdh_type_hdr type_hdr; /* 4B */ + struct zxdh_pi_hdr pi_hdr; /* 32B */ + struct zxdh_pd_hdr_ul pd_hdr; /* 24B */ +} __rte_packed; /* 60B */ + +struct zxdh_tx_region { + struct zxdh_net_hdr_dl tx_hdr; + union { + struct vring_desc tx_indir[ZXDH_MAX_TX_INDIRECT]; + struct vring_packed_desc tx_packed_indir[ZXDH_MAX_TX_INDIRECT]; + } __rte_packed; +}; + +static inline size_t vring_size(struct zxdh_hw *hw, uint32_t num, unsigned long align) +{ + size_t size; + + if (vtpci_packed_queue(hw)) { + size = num * sizeof(struct vring_packed_desc); + size += sizeof(struct vring_packed_desc_event); + size = RTE_ALIGN_CEIL(size, align); + size += sizeof(struct vring_packed_desc_event); + return size; + } + + size = num * sizeof(struct vring_desc); + size += sizeof(struct vring_avail) + (num * sizeof(uint16_t)); + size = RTE_ALIGN_CEIL(size, align); + size += sizeof(struct vring_used) + (num * sizeof(struct vring_used_elem)); + return size; +} + +static inline void vring_init_packed(struct vring_packed *vr, uint8_t *p, + unsigned long align, uint32_t num) +{ + vr->num = num; + vr->desc = (struct vring_packed_desc *)p; + vr->driver = (struct vring_packed_desc_event *)(p + + vr->num * sizeof(struct vring_packed_desc)); + vr->device = (struct vring_packed_desc_event *)RTE_ALIGN_CEIL(((uintptr_t)vr->driver + + sizeof(struct vring_packed_desc_event)), align); +} + +static inline void vring_desc_init_packed(struct virtqueue *vq, int32_t n) +{ + int32_t i = 0; + + for (i = 0; i < n - 1; i++) { + vq->vq_packed.ring.desc[i].id = i; + vq->vq_descx[i].next = i + 1; + } + vq->vq_packed.ring.desc[i].id = i; + vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END; +} + +static inline void vring_desc_init_indirect_packed(struct vring_packed_desc *dp, int32_t n) +{ + int32_t i = 0; + + for (i = 0; i < n; i++) { + dp[i].id = (uint16_t)i; + dp[i].flags = VRING_DESC_F_WRITE; + } +} + +static inline void virtqueue_disable_intr(struct virtqueue *vq) +{ + if (vq->vq_packed.event_flags_shadow != RING_EVENT_FLAGS_DISABLE) { + vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_DISABLE; + vq->vq_packed.ring.driver->desc_event_flags = vq->vq_packed.event_flags_shadow; + } +} + +struct rte_mbuf *zxdh_virtqueue_detach_unused(struct virtqueue *vq); +int32_t zxdh_free_queues(struct rte_eth_dev *dev); +int32_t zxdh_get_queue_type(uint16_t vtpci_queue_idx); + +#ifdef __cplusplus +} +#endif + #endif /* _ZXDH_QUEUE_H_ */ diff --git a/drivers/net/zxdh/zxdh_rxtx.h b/drivers/net/zxdh/zxdh_rxtx.h index 7314f76d2c..6476bc15e2 100644 --- a/drivers/net/zxdh/zxdh_rxtx.h +++ b/drivers/net/zxdh/zxdh_rxtx.h @@ -48,4 +48,8 @@ struct virtnet_tx { const struct rte_memzone *mz; /* mem zone to populate TX ring. */ }; +#ifdef __cplusplus +} +#endif + #endif /* _ZXDH_RXTX_H_ */ -- 2.27.0