add support for modifying queue depth, min depth is 512, and max depth is 32768 Signed-off-by: Junlong Wang --- drivers/net/zxdh/zxdh_ethdev.c | 161 +++++++++++++++++++++------------ drivers/net/zxdh/zxdh_ethdev.h | 15 +++ drivers/net/zxdh/zxdh_queue.c | 137 +++++++++++++++++++++++----- drivers/net/zxdh/zxdh_queue.h | 11 ++- 4 files changed, 240 insertions(+), 84 deletions(-) diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c index bc929bacc5..80b992d4ad 100644 --- a/drivers/net/zxdh/zxdh_ethdev.c +++ b/drivers/net/zxdh/zxdh_ethdev.c @@ -641,7 +641,8 @@ zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx) uint32_t vq_size = 0; int32_t ret = 0; - if (hw->channel_context[vtpci_logic_qidx].valid == 0) { + if (vtpci_logic_qidx >= ZXDH_QUEUES_NUM_MAX || + hw->channel_context[vtpci_logic_qidx].valid == 0) { PMD_DRV_LOG(ERR, "lch %d is invalid", vtpci_logic_qidx); return -EINVAL; } @@ -650,7 +651,10 @@ zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx) PMD_DRV_LOG(DEBUG, "vtpci_logic_qidx :%d setting up physical queue: %u on NUMA node %d", vtpci_logic_qidx, vtpci_phy_qidx, numa_node); - vq_size = ZXDH_QUEUE_DEPTH; + if (queue_type == ZXDH_VTNET_RQ) + vq_size = hw->queue_conf->conf[vtpci_logic_qidx >> 1].rx_nb_desc; + else + vq_size = hw->queue_conf->conf[vtpci_logic_qidx >> 1].tx_nb_desc; if (ZXDH_VTPCI_OPS(hw)->set_queue_num != NULL) ZXDH_VTPCI_OPS(hw)->set_queue_num(hw, vtpci_phy_qidx, vq_size); @@ -980,12 +984,6 @@ zxdh_dev_conf_offload(struct rte_eth_dev *dev) return ret; } - ret = zxdh_rss_configure(dev); - if (ret) { - PMD_DRV_LOG(ERR, "rss configure failed"); - return ret; - } - ret = zxdh_rx_csum_lro_offload_configure(dev); if (ret) { PMD_DRV_LOG(ERR, "rx csum lro configure failed"); @@ -1081,52 +1079,6 @@ zxdh_dev_configure(struct rte_eth_dev *dev) hw->has_tx_offload = zxdh_tx_offload_enabled(hw); hw->has_rx_offload = zxdh_rx_offload_enabled(hw); - if (dev->data->nb_rx_queues == hw->rx_qnum && - dev->data->nb_tx_queues == hw->tx_qnum) { - PMD_DRV_LOG(DEBUG, "The queue not need to change. queue_rx %d queue_tx %d", - hw->rx_qnum, hw->tx_qnum); - /*no queue changed */ - goto end; - } - - PMD_DRV_LOG(DEBUG, "queue changed need reset"); - /* Reset the device although not necessary at startup */ - zxdh_pci_reset(hw); - - /* Tell the host we've noticed this device. */ - zxdh_pci_set_status(hw, ZXDH_CONFIG_STATUS_ACK); - - /* Tell the host we've known how to drive the device. */ - zxdh_pci_set_status(hw, ZXDH_CONFIG_STATUS_DRIVER); - /* The queue needs to be released when reconfiguring*/ - if (hw->vqs != NULL) { - zxdh_dev_free_mbufs(dev); - zxdh_free_queues(dev); - } - - hw->rx_qnum = dev->data->nb_rx_queues; - hw->tx_qnum = dev->data->nb_tx_queues; - ret = zxdh_alloc_queues(dev); - if (ret < 0) - return ret; - - zxdh_datach_set(dev); - - if (zxdh_configure_intr(dev) < 0) { - PMD_DRV_LOG(ERR, "Failed to configure interrupt"); - zxdh_free_queues(dev); - return -1; - } - - ret = zxdh_rss_qid_config(dev); - if (ret) { - PMD_DRV_LOG(ERR, "Failed to configure base qid!"); - return -1; - } - - zxdh_pci_reinit_complete(hw); - -end: zxdh_dev_conf_offload(dev); zxdh_update_net_hdr_dl(hw); return ret; @@ -1264,6 +1216,9 @@ zxdh_priv_res_free(struct zxdh_hw *priv) rte_free(priv->channel_context); priv->channel_context = NULL; + + rte_free(priv->queue_conf); + priv->queue_conf = NULL; } static int @@ -1354,6 +1309,88 @@ zxdh_mac_config(struct rte_eth_dev *eth_dev) return ret; } +static int32_t zxdh_reconfig_queues(struct rte_eth_dev *dev) +{ + int32_t ret; + struct zxdh_hw *hw = dev->data->dev_private; + + zxdh_pci_reset(hw); + + /* Tell the host we've noticed this device. */ + zxdh_pci_set_status(hw, ZXDH_CONFIG_STATUS_ACK); + + /* Tell the host we've known how to drive the device. */ + zxdh_pci_set_status(hw, ZXDH_CONFIG_STATUS_DRIVER); + /* The queue needs to be released when reconfiguring */ + if (hw->vqs != NULL) { + zxdh_dev_free_mbufs(dev); + zxdh_free_queues(dev); + } + + hw->rx_qnum = dev->data->nb_rx_queues; + hw->tx_qnum = dev->data->nb_tx_queues; + ret = zxdh_alloc_queues(dev); + if (ret < 0) + return ret; + + zxdh_datach_set(dev); + + if (zxdh_configure_intr(dev) < 0) { + PMD_DRV_LOG(ERR, "Failed to configure interrupt"); + zxdh_free_queues(dev); + return -1; + } + + zxdh_pci_reinit_complete(hw); + return 0; +} + +static int32_t zxdh_config_queue(struct rte_eth_dev *dev) +{ + struct zxdh_hw *hw = dev->data->dev_private; + int32_t ret = 0, i = 0; + + if (hw->queue_conf->queue_changed) { + ret = zxdh_reconfig_queues(dev); + if (ret) + return ret; + + ret = zxdh_rss_qid_config(dev); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to configure base qid!"); + return -1; + } + + ret = zxdh_rss_configure(dev); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to config rss"); + return -1; + } + hw->queue_conf->queue_changed = 0; + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + ret = zxdh_tx_queue_config(dev, i); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to config tx queue"); + return ret; + } + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + ret = zxdh_rx_queue_config(dev, i); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to config rx queue"); + return ret; + } + ret = zxdh_dev_rx_queue_setup_finish(dev, i); + if (ret < 0) + return ret; + } + + return 0; +} + static int zxdh_dev_start(struct rte_eth_dev *dev) { @@ -1363,12 +1400,9 @@ zxdh_dev_start(struct rte_eth_dev *dev) uint16_t logic_qidx; uint16_t i; - for (i = 0; i < dev->data->nb_rx_queues; i++) { - logic_qidx = 2 * i + ZXDH_RQ_QUEUE_IDX; - ret = zxdh_dev_rx_queue_setup_finish(dev, logic_qidx); - if (ret < 0) - return ret; - } + ret = zxdh_config_queue(dev); + if (ret) + return ret; zxdh_set_rxtx_funcs(dev); ret = zxdh_intr_enable(dev); @@ -2081,6 +2115,13 @@ zxdh_priv_res_init(struct zxdh_hw *hw) PMD_DRV_LOG(ERR, "Failed to allocate channel_context"); return -ENOMEM; } + + hw->queue_conf = rte_zmalloc("zxdh_queue_conf", sizeof(struct zxdh_queue_conf), 0); + if (hw->queue_conf == NULL) { + PMD_DRV_LOG(ERR, "Failed to allocate queue conf"); + return -ENOMEM; + } + return 0; } diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h index a269199540..411d287f32 100644 --- a/drivers/net/zxdh/zxdh_ethdev.h +++ b/drivers/net/zxdh/zxdh_ethdev.h @@ -94,6 +94,20 @@ struct vfinfo { struct rte_ether_addr vf_mac[ZXDH_MAX_MAC_ADDRS]; }; +struct queue_conf { + struct rte_mempool *queue_mp; + struct rte_eth_rxconf zxdh_rx_conf; + struct rte_eth_txconf zxdh_tx_conf; + uint16_t rx_nb_desc; + uint16_t tx_nb_desc; +}; + +struct zxdh_queue_conf { + struct queue_conf conf[ZXDH_QUEUES_NUM_MAX / 2]; + uint16_t queue_changed; + uint16_t rsv; +}; + struct zxdh_hw { struct rte_eth_dev *eth_dev; struct zxdh_pci_common_cfg *common_cfg; @@ -105,6 +119,7 @@ struct zxdh_hw { struct zxdh_dev_shared_data *dev_sd; struct zxdh_dev_nic_shared_data *dev_nic_sd; struct vfinfo *vfinfo; + struct zxdh_queue_conf *queue_conf; uint64_t bar_addr[ZXDH_NUM_BARS]; uint64_t host_features; diff --git a/drivers/net/zxdh/zxdh_queue.c b/drivers/net/zxdh/zxdh_queue.c index 9266756d79..4eb8ef782c 100644 --- a/drivers/net/zxdh/zxdh_queue.c +++ b/drivers/net/zxdh/zxdh_queue.c @@ -184,6 +184,45 @@ zxdh_check_mempool(struct rte_mempool *mp, uint16_t offset, uint16_t min_length) return 0; } +static unsigned int +log2above(unsigned int v) +{ + unsigned int l; + unsigned int r; + + for (l = 0, r = 0; (v >> 1); ++l, v >>= 1) + r |= (v & 1); + return l + r; +} + +static uint16_t zxdh_queue_desc_pre_setup(uint16_t desc) +{ + uint32_t nb_desc = desc; + + if (desc < ZXDH_MIN_QUEUE_DEPTH) { + PMD_DRV_LOG(WARNING, "nb_desc(%u) < min queue depth (%u), turn to min queue depth", + desc, ZXDH_MIN_QUEUE_DEPTH); + return ZXDH_MIN_QUEUE_DEPTH; + } + + if (desc > ZXDH_MAX_QUEUE_DEPTH) { + PMD_DRV_LOG(WARNING, "nb_desc(%u) > max queue depth (%d), turn to max queue depth", + desc, ZXDH_MAX_QUEUE_DEPTH); + return ZXDH_MAX_QUEUE_DEPTH; + } + + if (!rte_is_power_of_2(desc)) { + nb_desc = 1 << log2above(desc); + if (nb_desc > ZXDH_MAX_QUEUE_DEPTH) + nb_desc = ZXDH_MAX_QUEUE_DEPTH; + PMD_DRV_LOG(WARNING, "nb_desc(%u) turn to the next power of two (%u)", + desc, nb_desc); + } + + return nb_desc; +} + + int32_t zxdh_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -191,37 +230,66 @@ zxdh_dev_rx_queue_setup(struct rte_eth_dev *dev, uint32_t socket_id __rte_unused, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) +{ + struct zxdh_hw *hw = dev->data->dev_private; + uint16_t valid_nb_desc = 0; + + if (rx_conf->rx_deferred_start) { + PMD_RX_LOG(ERR, "Rx deferred start is not supported"); + return -EINVAL; + } + + valid_nb_desc = zxdh_queue_desc_pre_setup(nb_desc); + if (dev->data->nb_rx_queues != hw->rx_qnum || + valid_nb_desc != hw->queue_conf->conf[queue_idx].rx_nb_desc) { + PMD_RX_LOG(DEBUG, "rx queue changed. rxq:[%d], hw->rxq:[%d], nb_desc:%d, hw->nb_desc:%d", + dev->data->nb_rx_queues, hw->rx_qnum, valid_nb_desc, + hw->queue_conf->conf[queue_idx].rx_nb_desc); + hw->queue_conf->queue_changed = 1; + } + + rte_memcpy(&hw->queue_conf->conf[queue_idx].zxdh_rx_conf, + rx_conf, sizeof(struct rte_eth_rxconf)); + hw->queue_conf->conf[queue_idx].rx_nb_desc = valid_nb_desc; + hw->queue_conf->conf[queue_idx].queue_mp = mp; + + return 0; +} + +int32_t +zxdh_rx_queue_config(struct rte_eth_dev *dev, uint16_t queue_idx) { struct zxdh_hw *hw = dev->data->dev_private; uint16_t vtpci_logic_qidx = 2 * queue_idx + ZXDH_RQ_QUEUE_IDX; struct zxdh_virtqueue *vq = hw->vqs[vtpci_logic_qidx]; int32_t ret = 0; - if (rx_conf->rx_deferred_start) { - PMD_RX_LOG(ERR, "Rx deferred start is not supported"); + if (!hw->queue_conf) { + PMD_RX_LOG(ERR, "rx queue config failed queue_conf is NULL, queue-idx:%d", + queue_idx); return -EINVAL; } + + struct rte_eth_rxconf *rx_conf = &hw->queue_conf->conf[queue_idx].zxdh_rx_conf; uint16_t rx_free_thresh = rx_conf->rx_free_thresh; if (rx_free_thresh == 0) rx_free_thresh = RTE_MIN(vq->vq_nentries / 4, ZXDH_RX_FREE_THRESH); - /* rx_free_thresh must be multiples of four. */ if (rx_free_thresh & 0x3) { PMD_RX_LOG(ERR, "(rx_free_thresh=%u port=%u queue=%u)", rx_free_thresh, dev->data->port_id, queue_idx); return -EINVAL; } - /* rx_free_thresh must be less than the number of RX entries */ if (rx_free_thresh >= vq->vq_nentries) { PMD_RX_LOG(ERR, "RX entries (%u). (rx_free_thresh=%u port=%u queue=%u)", vq->vq_nentries, rx_free_thresh, dev->data->port_id, queue_idx); return -EINVAL; } + vq->vq_free_thresh = rx_free_thresh; - nb_desc = ZXDH_QUEUE_DEPTH; - vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc); + vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, hw->queue_conf->conf[queue_idx].rx_nb_desc); struct zxdh_virtnet_rx *rxvq = &vq->rxq; rxvq->queue_id = vtpci_logic_qidx; @@ -231,6 +299,7 @@ zxdh_dev_rx_queue_setup(struct rte_eth_dev *dev, if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) mbuf_min_size = ZXDH_MBUF_SIZE_4K; + struct rte_mempool *mp = hw->queue_conf->conf[queue_idx].queue_mp; ret = zxdh_check_mempool(mp, RTE_PKTMBUF_HEADROOM, mbuf_min_size); if (ret != 0) { PMD_RX_LOG(ERR, @@ -238,6 +307,7 @@ zxdh_dev_rx_queue_setup(struct rte_eth_dev *dev, return -EINVAL; } rxvq->mpool = mp; + if (queue_idx < dev->data->nb_rx_queues) dev->data->rx_queues[queue_idx] = rxvq; @@ -251,21 +321,48 @@ zxdh_dev_tx_queue_setup(struct rte_eth_dev *dev, uint32_t socket_id __rte_unused, const struct rte_eth_txconf *tx_conf) { - uint16_t vtpci_logic_qidx = 2 * queue_idx + ZXDH_TQ_QUEUE_IDX; struct zxdh_hw *hw = dev->data->dev_private; - struct zxdh_virtqueue *vq = hw->vqs[vtpci_logic_qidx]; - struct zxdh_virtnet_tx *txvq = NULL; - uint16_t tx_free_thresh = 0; + uint16_t valid_nb_desc = 0; if (tx_conf->tx_deferred_start) { PMD_TX_LOG(ERR, "Tx deferred start is not supported"); return -EINVAL; } - nb_desc = ZXDH_QUEUE_DEPTH; + valid_nb_desc = zxdh_queue_desc_pre_setup(nb_desc); + if (dev->data->nb_tx_queues != hw->tx_qnum || + valid_nb_desc != hw->queue_conf->conf[queue_idx].tx_nb_desc){ + PMD_TX_LOG(DEBUG, "tx queue changed. txq:[%d], hw->txq:[%d], nb_desc:%d, hw->nb_desc:%d", + dev->data->nb_tx_queues, hw->tx_qnum, valid_nb_desc, + hw->queue_conf->conf[queue_idx].tx_nb_desc); + hw->queue_conf->queue_changed = 1; + } + + rte_memcpy(&hw->queue_conf->conf[queue_idx].zxdh_tx_conf, + tx_conf, sizeof(struct rte_eth_txconf)); + hw->queue_conf->conf[queue_idx].tx_nb_desc = valid_nb_desc; + + return 0; +} + +int32_t +zxdh_tx_queue_config(struct rte_eth_dev *dev, uint16_t queue_idx) +{ + struct zxdh_hw *hw = dev->data->dev_private; + struct zxdh_virtnet_tx *txvq = NULL; + uint16_t vtpci_logic_qidx = 2 * queue_idx + ZXDH_TQ_QUEUE_IDX; + struct zxdh_virtqueue *vq = hw->vqs[vtpci_logic_qidx]; + uint16_t tx_free_thresh = 0; + + if (!hw->queue_conf) { + PMD_TX_LOG(ERR, "tx queue config failed queue_conf is NULL, queue_idx:%d", + queue_idx); + return -EINVAL; + } - vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc); + struct rte_eth_txconf *tx_conf = &hw->queue_conf->conf[queue_idx].zxdh_tx_conf; + vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, hw->queue_conf->conf[queue_idx].tx_nb_desc); txvq = &vq->txq; txvq->queue_id = vtpci_logic_qidx; @@ -273,15 +370,14 @@ zxdh_dev_tx_queue_setup(struct rte_eth_dev *dev, if (tx_free_thresh == 0) tx_free_thresh = RTE_MIN(vq->vq_nentries / 4, ZXDH_TX_FREE_THRESH); - /* tx_free_thresh must be less than the number of TX entries minus 3 */ if (tx_free_thresh >= (vq->vq_nentries - 3)) { - PMD_TX_LOG(ERR, "TX entries - 3 (%u). (tx_free_thresh=%u port=%u queue=%u)", - vq->vq_nentries - 3, tx_free_thresh, dev->data->port_id, queue_idx); + PMD_TX_LOG(ERR, "tx_free_thresh must be less than the number of TX entries minus 3 (%u). (tx_free_thresh=%u port=%u queue=%u)", + vq->vq_nentries - 3, + tx_free_thresh, dev->data->port_id, queue_idx); return -EINVAL; } vq->vq_free_thresh = tx_free_thresh; - if (queue_idx < dev->data->nb_tx_queues) dev->data->tx_queues[queue_idx] = txvq; @@ -337,19 +433,14 @@ int32_t zxdh_enqueue_recv_refill_packed(struct zxdh_virtqueue *vq, return 0; } -int32_t zxdh_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t logic_qidx) +int32_t zxdh_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx) { struct zxdh_hw *hw = dev->data->dev_private; + uint16_t logic_qidx = ((queue_idx << 1) + ZXDH_RQ_QUEUE_IDX) % ZXDH_QUEUES_NUM_MAX; struct zxdh_virtqueue *vq = hw->vqs[logic_qidx]; struct zxdh_virtnet_rx *rxvq = &vq->rxq; - uint16_t desc_idx; int32_t error = 0; - /* Allocate blank mbufs for the each rx descriptor */ - memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf)); - for (desc_idx = 0; desc_idx < ZXDH_MBUF_BURST_SZ; desc_idx++) - vq->sw_ring[vq->vq_nentries + desc_idx] = &rxvq->fake_mbuf; - while (!zxdh_queue_full(vq)) { struct rte_mbuf *new_pkts[ZXDH_MBUF_BURST_SZ]; uint16_t free_cnt = RTE_MIN(ZXDH_MBUF_BURST_SZ, vq->vq_free_cnt); diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h index 1a54e7cfc9..3c89687d45 100644 --- a/drivers/net/zxdh/zxdh_queue.h +++ b/drivers/net/zxdh/zxdh_queue.h @@ -21,6 +21,14 @@ enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 }; #define ZXDH_TQ_QUEUE_IDX 1 #define ZXDH_MAX_TX_INDIRECT 8 +#define ZXDH_MIN_QUEUE_DEPTH 512 +#define ZXDH_MAX_QUEUE_DEPTH 32768 + +#define ZXDH_TX_FREE_THRESH 32 +#define ZXDH_RX_FREE_THRESH 32 + +#define ZXDH_MBUF_SIZE_4K 4096 + /* This marks a buffer as continuing via the next field. */ #define ZXDH_VRING_DESC_F_NEXT 1 @@ -424,5 +432,6 @@ int32_t zxdh_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t logic_q void zxdh_queue_rxvq_flush(struct zxdh_virtqueue *vq); int32_t zxdh_enqueue_recv_refill_packed(struct zxdh_virtqueue *vq, struct rte_mbuf **cookie, uint16_t num); - +int32_t zxdh_tx_queue_config(struct rte_eth_dev *dev, uint16_t queue_idx); +int32_t zxdh_rx_queue_config(struct rte_eth_dev *dev, uint16_t queue_idx); #endif /* ZXDH_QUEUE_H */ -- 2.27.0