tested-by: Rushil Gupta On Mon, May 8, 2023 at 8:07 PM Junfeng Guo wrote: > Add support for queue operations for GQI: > - gve_rx_queue_start > - gve_tx_queue_start > - gve_rx_queue_stop > - gve_tx_queue_stop > > Add support for queue operations for DQO: > - gve_rx_queue_start_dqo > - gve_tx_queue_start_dqo > - gve_rx_queue_stop_dqo > - gve_tx_queue_stop_dqo > > Also move the funcs of rxq_mbufs_alloc into the corresponding files. > > Signed-off-by: Junfeng Guo > --- > drivers/net/gve/gve_ethdev.c | 166 +++++++++++------------------------ > drivers/net/gve/gve_ethdev.h | 36 ++++++++ > drivers/net/gve/gve_rx.c | 96 ++++++++++++++++++-- > drivers/net/gve/gve_rx_dqo.c | 97 ++++++++++++++++++-- > drivers/net/gve/gve_tx.c | 54 ++++++++++-- > drivers/net/gve/gve_tx_dqo.c | 54 ++++++++++-- > 6 files changed, 364 insertions(+), 139 deletions(-) > > diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c > index 8b6861a24f..1dcb3b3a01 100644 > --- a/drivers/net/gve/gve_ethdev.c > +++ b/drivers/net/gve/gve_ethdev.c > @@ -104,81 +104,6 @@ gve_dev_configure(struct rte_eth_dev *dev) > return 0; > } > > -static int > -gve_refill_pages(struct gve_rx_queue *rxq) > -{ > - struct rte_mbuf *nmb; > - uint16_t i; > - int diag; > - > - diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0], > rxq->nb_rx_desc); > - if (diag < 0) { > - for (i = 0; i < rxq->nb_rx_desc - 1; i++) { > - nmb = rte_pktmbuf_alloc(rxq->mpool); > - if (!nmb) > - break; > - rxq->sw_ring[i] = nmb; > - } > - if (i < rxq->nb_rx_desc - 1) > - return -ENOMEM; > - } > - rxq->nb_avail = 0; > - rxq->next_avail = rxq->nb_rx_desc - 1; > - > - for (i = 0; i < rxq->nb_rx_desc; i++) { > - if (rxq->is_gqi_qpl) { > - rxq->rx_data_ring[i].addr = rte_cpu_to_be_64(i * > PAGE_SIZE); > - } else { > - if (i == rxq->nb_rx_desc - 1) > - break; > - nmb = rxq->sw_ring[i]; > - rxq->rx_data_ring[i].addr = > rte_cpu_to_be_64(rte_mbuf_data_iova(nmb)); > - } > - } > - > - rte_write32(rte_cpu_to_be_32(rxq->next_avail), rxq->qrx_tail); > - > - return 0; > -} > - > -static int > -gve_refill_dqo(struct gve_rx_queue *rxq) > -{ > - struct rte_mbuf *nmb; > - uint16_t i; > - int diag; > - > - diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0], > rxq->nb_rx_desc); > - if (diag < 0) { > - rxq->stats.no_mbufs_bulk++; > - for (i = 0; i < rxq->nb_rx_desc - 1; i++) { > - nmb = rte_pktmbuf_alloc(rxq->mpool); > - if (!nmb) > - break; > - rxq->sw_ring[i] = nmb; > - } > - if (i < rxq->nb_rx_desc - 1) { > - rxq->stats.no_mbufs += rxq->nb_rx_desc - 1 - i; > - return -ENOMEM; > - } > - } > - > - for (i = 0; i < rxq->nb_rx_desc; i++) { > - if (i == rxq->nb_rx_desc - 1) > - break; > - nmb = rxq->sw_ring[i]; > - rxq->rx_ring[i].buf_addr = > rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); > - rxq->rx_ring[i].buf_id = rte_cpu_to_le_16(i); > - } > - > - rxq->nb_rx_hold = 0; > - rxq->bufq_tail = rxq->nb_rx_desc - 1; > - > - rte_write32(rxq->bufq_tail, rxq->qrx_tail); > - > - return 0; > -} > - > static int > gve_link_update(struct rte_eth_dev *dev, __rte_unused int > wait_to_complete) > { > @@ -208,65 +133,68 @@ gve_link_update(struct rte_eth_dev *dev, > __rte_unused int wait_to_complete) > } > > static int > -gve_dev_start(struct rte_eth_dev *dev) > +gve_start_queues(struct rte_eth_dev *dev) > { > - uint16_t num_queues = dev->data->nb_tx_queues; > struct gve_priv *priv = dev->data->dev_private; > - struct gve_tx_queue *txq; > - struct gve_rx_queue *rxq; > + uint16_t num_queues; > uint16_t i; > - int err; > + int ret; > > + num_queues = dev->data->nb_tx_queues; > priv->txqs = (struct gve_tx_queue **)dev->data->tx_queues; > - err = gve_adminq_create_tx_queues(priv, num_queues); > - if (err) { > - PMD_DRV_LOG(ERR, "failed to create %u tx queues.", > num_queues); > - return err; > - } > - for (i = 0; i < num_queues; i++) { > - txq = priv->txqs[i]; > - txq->qtx_tail = > - &priv->db_bar2[rte_be_to_cpu_32(txq->qres->db_index)]; > - txq->qtx_head = > - > &priv->cnt_array[rte_be_to_cpu_32(txq->qres->counter_index)]; > - > - rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), > txq->ntfy_addr); > - } > + ret = gve_adminq_create_tx_queues(priv, num_queues); > + if (ret != 0) { > + PMD_DRV_LOG(ERR, "Failed to create %u tx queues.", > num_queues); > + return ret; > + } > + for (i = 0; i < num_queues; i++) > + if (gve_tx_queue_start(dev, i) != 0) { > + PMD_DRV_LOG(ERR, "Fail to start Tx queue %d", i); > + goto err_tx; > + } > > num_queues = dev->data->nb_rx_queues; > priv->rxqs = (struct gve_rx_queue **)dev->data->rx_queues; > - err = gve_adminq_create_rx_queues(priv, num_queues); > - if (err) { > - PMD_DRV_LOG(ERR, "failed to create %u rx queues.", > num_queues); > + ret = gve_adminq_create_rx_queues(priv, num_queues); > + if (ret != 0) { > + PMD_DRV_LOG(ERR, "Failed to create %u rx queues.", > num_queues); > goto err_tx; > } > for (i = 0; i < num_queues; i++) { > - rxq = priv->rxqs[i]; > - rxq->qrx_tail = > - &priv->db_bar2[rte_be_to_cpu_32(rxq->qres->db_index)]; > - > - rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), > rxq->ntfy_addr); > - > if (gve_is_gqi(priv)) > - err = gve_refill_pages(rxq); > + ret = gve_rx_queue_start(dev, i); > else > - err = gve_refill_dqo(rxq); > - if (err) { > - PMD_DRV_LOG(ERR, "Failed to refill for RX"); > + ret = gve_rx_queue_start_dqo(dev, i); > + if (ret != 0) { > + PMD_DRV_LOG(ERR, "Fail to start Rx queue %d", i); > goto err_rx; > } > } > > - dev->data->dev_started = 1; > - gve_link_update(dev, 0); > - > return 0; > > err_rx: > gve_stop_rx_queues(dev); > err_tx: > gve_stop_tx_queues(dev); > - return err; > + return ret; > +} > + > +static int > +gve_dev_start(struct rte_eth_dev *dev) > +{ > + int ret; > + > + ret = gve_start_queues(dev); > + if (ret != 0) { > + PMD_DRV_LOG(ERR, "Failed to start queues"); > + return ret; > + } > + > + dev->data->dev_started = 1; > + gve_link_update(dev, 0); > + > + return 0; > } > > static int > @@ -573,6 +501,10 @@ static const struct eth_dev_ops gve_eth_dev_ops = { > .tx_queue_setup = gve_tx_queue_setup, > .rx_queue_release = gve_rx_queue_release, > .tx_queue_release = gve_tx_queue_release, > + .rx_queue_start = gve_rx_queue_start, > + .tx_queue_start = gve_tx_queue_start, > + .rx_queue_stop = gve_rx_queue_stop, > + .tx_queue_stop = gve_tx_queue_stop, > .link_update = gve_link_update, > .stats_get = gve_dev_stats_get, > .stats_reset = gve_dev_stats_reset, > @@ -591,6 +523,10 @@ static const struct eth_dev_ops gve_eth_dev_ops_dqo = > { > .tx_queue_setup = gve_tx_queue_setup_dqo, > .rx_queue_release = gve_rx_queue_release_dqo, > .tx_queue_release = gve_tx_queue_release_dqo, > + .rx_queue_start = gve_rx_queue_start_dqo, > + .tx_queue_start = gve_tx_queue_start_dqo, > + .rx_queue_stop = gve_rx_queue_stop_dqo, > + .tx_queue_stop = gve_tx_queue_stop_dqo, > .link_update = gve_link_update, > .stats_get = gve_dev_stats_get, > .stats_reset = gve_dev_stats_reset, > @@ -877,12 +813,12 @@ gve_dev_init(struct rte_eth_dev *eth_dev) > > if (gve_is_gqi(priv)) { > eth_dev->dev_ops = &gve_eth_dev_ops; > - eth_dev->rx_pkt_burst = gve_rx_burst; > - eth_dev->tx_pkt_burst = gve_tx_burst; > + gve_set_rx_function(eth_dev); > + gve_set_tx_function(eth_dev); > } else { > eth_dev->dev_ops = &gve_eth_dev_ops_dqo; > - eth_dev->rx_pkt_burst = gve_rx_burst_dqo; > - eth_dev->tx_pkt_burst = gve_tx_burst_dqo; > + gve_set_rx_function_dqo(eth_dev); > + gve_set_tx_function_dqo(eth_dev); > } > > eth_dev->data->mac_addrs = &priv->dev_addr; > diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h > index 53a75044c5..cd62debd22 100644 > --- a/drivers/net/gve/gve_ethdev.h > +++ b/drivers/net/gve/gve_ethdev.h > @@ -367,6 +367,18 @@ gve_tx_queue_release(struct rte_eth_dev *dev, > uint16_t qid); > void > gve_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid); > > +int > +gve_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id); > + > +int > +gve_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); > + > +int > +gve_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); > + > +int > +gve_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); > + > void > gve_stop_tx_queues(struct rte_eth_dev *dev); > > @@ -379,6 +391,12 @@ gve_rx_burst(void *rxq, struct rte_mbuf **rx_pkts, > uint16_t nb_pkts); > uint16_t > gve_tx_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); > > +void > +gve_set_rx_function(struct rte_eth_dev *dev); > + > +void > +gve_set_tx_function(struct rte_eth_dev *dev); > + > /* Below functions are used for DQO */ > > int > @@ -397,6 +415,18 @@ gve_tx_queue_release_dqo(struct rte_eth_dev *dev, > uint16_t qid); > void > gve_rx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid); > > +int > +gve_rx_queue_start_dqo(struct rte_eth_dev *dev, uint16_t rx_queue_id); > + > +int > +gve_tx_queue_start_dqo(struct rte_eth_dev *dev, uint16_t tx_queue_id); > + > +int > +gve_rx_queue_stop_dqo(struct rte_eth_dev *dev, uint16_t rx_queue_id); > + > +int > +gve_tx_queue_stop_dqo(struct rte_eth_dev *dev, uint16_t tx_queue_id); > + > void > gve_stop_tx_queues_dqo(struct rte_eth_dev *dev); > > @@ -409,4 +439,10 @@ gve_rx_burst_dqo(void *rxq, struct rte_mbuf > **rx_pkts, uint16_t nb_pkts); > uint16_t > gve_tx_burst_dqo(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); > > +void > +gve_set_rx_function_dqo(struct rte_eth_dev *dev); > + > +void > +gve_set_tx_function_dqo(struct rte_eth_dev *dev); > + > #endif /* _GVE_ETHDEV_H_ */ > diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c > index f2f6202404..b8c92ccda0 100644 > --- a/drivers/net/gve/gve_rx.c > +++ b/drivers/net/gve/gve_rx.c > @@ -414,11 +414,91 @@ gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t > queue_id, > return err; > } > > +static int > +gve_rxq_mbufs_alloc(struct gve_rx_queue *rxq) > +{ > + struct rte_mbuf *nmb; > + uint16_t i; > + int diag; > + > + diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0], > rxq->nb_rx_desc); > + if (diag < 0) { > + for (i = 0; i < rxq->nb_rx_desc - 1; i++) { > + nmb = rte_pktmbuf_alloc(rxq->mpool); > + if (!nmb) > + break; > + rxq->sw_ring[i] = nmb; > + } > + if (i < rxq->nb_rx_desc - 1) > + return -ENOMEM; > + } > + rxq->nb_avail = 0; > + rxq->next_avail = rxq->nb_rx_desc - 1; > + > + for (i = 0; i < rxq->nb_rx_desc; i++) { > + if (rxq->is_gqi_qpl) { > + rxq->rx_data_ring[i].addr = rte_cpu_to_be_64(i * > PAGE_SIZE); > + } else { > + if (i == rxq->nb_rx_desc - 1) > + break; > + nmb = rxq->sw_ring[i]; > + rxq->rx_data_ring[i].addr = > rte_cpu_to_be_64(rte_mbuf_data_iova(nmb)); > + } > + } > + > + rte_write32(rte_cpu_to_be_32(rxq->next_avail), rxq->qrx_tail); > + > + return 0; > +} > + > +int > +gve_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) > +{ > + struct gve_priv *hw = dev->data->dev_private; > + struct gve_rx_queue *rxq; > + int ret; > + > + if (rx_queue_id >= dev->data->nb_rx_queues) > + return -EINVAL; > + > + rxq = dev->data->rx_queues[rx_queue_id]; > + > + rxq->qrx_tail = > &hw->db_bar2[rte_be_to_cpu_32(rxq->qres->db_index)]; > + > + rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), rxq->ntfy_addr); > + > + ret = gve_rxq_mbufs_alloc(rxq); > + if (ret != 0) { > + PMD_DRV_LOG(ERR, "Failed to alloc Rx queue mbuf"); > + return ret; > + } > + > + dev->data->rx_queue_state[rx_queue_id] = > RTE_ETH_QUEUE_STATE_STARTED; > + > + return 0; > +} > + > +int > +gve_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) > +{ > + struct gve_rx_queue *rxq; > + > + if (rx_queue_id >= dev->data->nb_rx_queues) > + return -EINVAL; > + > + rxq = dev->data->rx_queues[rx_queue_id]; > + gve_release_rxq_mbufs(rxq); > + gve_reset_rxq(rxq); > + > + dev->data->rx_queue_state[rx_queue_id] = > RTE_ETH_QUEUE_STATE_STOPPED; > + > + return 0; > +} > + > void > gve_stop_rx_queues(struct rte_eth_dev *dev) > { > struct gve_priv *hw = dev->data->dev_private; > - struct gve_rx_queue *rxq; > uint16_t i; > int err; > > @@ -429,9 +509,13 @@ gve_stop_rx_queues(struct rte_eth_dev *dev) > if (err != 0) > PMD_DRV_LOG(WARNING, "failed to destroy rxqs"); > > - for (i = 0; i < dev->data->nb_rx_queues; i++) { > - rxq = dev->data->rx_queues[i]; > - gve_release_rxq_mbufs(rxq); > - gve_reset_rxq(rxq); > - } > + for (i = 0; i < dev->data->nb_rx_queues; i++) > + if (gve_rx_queue_stop(dev, i) != 0) > + PMD_DRV_LOG(WARNING, "Fail to stop Rx queue %d", > i); > +} > + > +void > +gve_set_rx_function(struct rte_eth_dev *dev) > +{ > + dev->rx_pkt_burst = gve_rx_burst; > } > diff --git a/drivers/net/gve/gve_rx_dqo.c b/drivers/net/gve/gve_rx_dqo.c > index 1d6b21359c..236aefd2a8 100644 > --- a/drivers/net/gve/gve_rx_dqo.c > +++ b/drivers/net/gve/gve_rx_dqo.c > @@ -333,11 +333,92 @@ gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, > uint16_t queue_id, > return err; > } > > +static int > +gve_rxq_mbufs_alloc_dqo(struct gve_rx_queue *rxq) > +{ > + struct rte_mbuf *nmb; > + uint16_t i; > + int diag; > + > + diag = rte_pktmbuf_alloc_bulk(rxq->mpool, &rxq->sw_ring[0], > rxq->nb_rx_desc); > + if (diag < 0) { > + rxq->stats.no_mbufs_bulk++; > + for (i = 0; i < rxq->nb_rx_desc - 1; i++) { > + nmb = rte_pktmbuf_alloc(rxq->mpool); > + if (!nmb) > + break; > + rxq->sw_ring[i] = nmb; > + } > + if (i < rxq->nb_rx_desc - 1) { > + rxq->stats.no_mbufs += rxq->nb_rx_desc - 1 - i; > + return -ENOMEM; > + } > + } > + > + for (i = 0; i < rxq->nb_rx_desc; i++) { > + if (i == rxq->nb_rx_desc - 1) > + break; > + nmb = rxq->sw_ring[i]; > + rxq->rx_ring[i].buf_addr = > rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb)); > + rxq->rx_ring[i].buf_id = rte_cpu_to_le_16(i); > + } > + > + rxq->nb_rx_hold = 0; > + rxq->bufq_tail = rxq->nb_rx_desc - 1; > + > + rte_write32(rxq->bufq_tail, rxq->qrx_tail); > + > + return 0; > +} > + > +int > +gve_rx_queue_start_dqo(struct rte_eth_dev *dev, uint16_t rx_queue_id) > +{ > + struct gve_priv *hw = dev->data->dev_private; > + struct gve_rx_queue *rxq; > + int ret; > + > + if (rx_queue_id >= dev->data->nb_rx_queues) > + return -EINVAL; > + > + rxq = dev->data->rx_queues[rx_queue_id]; > + > + rxq->qrx_tail = > &hw->db_bar2[rte_be_to_cpu_32(rxq->qres->db_index)]; > + > + rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), rxq->ntfy_addr); > + > + ret = gve_rxq_mbufs_alloc_dqo(rxq); > + if (ret != 0) { > + PMD_DRV_LOG(ERR, "Failed to alloc Rx queue mbuf"); > + return ret; > + } > + > + dev->data->rx_queue_state[rx_queue_id] = > RTE_ETH_QUEUE_STATE_STARTED; > + > + return 0; > +} > + > +int > +gve_rx_queue_stop_dqo(struct rte_eth_dev *dev, uint16_t rx_queue_id) > +{ > + struct gve_rx_queue *rxq; > + > + if (rx_queue_id >= dev->data->nb_rx_queues) > + return -EINVAL; > + > + rxq = dev->data->rx_queues[rx_queue_id]; > + gve_release_rxq_mbufs_dqo(rxq); > + gve_reset_rxq_dqo(rxq); > + > + dev->data->rx_queue_state[rx_queue_id] = > RTE_ETH_QUEUE_STATE_STOPPED; > + > + return 0; > +} > + > void > gve_stop_rx_queues_dqo(struct rte_eth_dev *dev) > { > struct gve_priv *hw = dev->data->dev_private; > - struct gve_rx_queue *rxq; > uint16_t i; > int err; > > @@ -345,9 +426,13 @@ gve_stop_rx_queues_dqo(struct rte_eth_dev *dev) > if (err != 0) > PMD_DRV_LOG(WARNING, "failed to destroy rxqs"); > > - for (i = 0; i < dev->data->nb_rx_queues; i++) { > - rxq = dev->data->rx_queues[i]; > - gve_release_rxq_mbufs_dqo(rxq); > - gve_reset_rxq_dqo(rxq); > - } > + for (i = 0; i < dev->data->nb_rx_queues; i++) > + if (gve_rx_queue_stop_dqo(dev, i) != 0) > + PMD_DRV_LOG(WARNING, "Fail to stop Rx queue %d", > i); > +} > + > +void > +gve_set_rx_function_dqo(struct rte_eth_dev *dev) > +{ > + dev->rx_pkt_burst = gve_rx_burst_dqo; > } > diff --git a/drivers/net/gve/gve_tx.c b/drivers/net/gve/gve_tx.c > index 13dc807623..2e0d001109 100644 > --- a/drivers/net/gve/gve_tx.c > +++ b/drivers/net/gve/gve_tx.c > @@ -664,11 +664,49 @@ gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t > queue_id, uint16_t nb_desc, > return err; > } > > +int > +gve_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) > +{ > + struct gve_priv *hw = dev->data->dev_private; > + struct gve_tx_queue *txq; > + > + if (tx_queue_id >= dev->data->nb_tx_queues) > + return -EINVAL; > + > + txq = dev->data->tx_queues[tx_queue_id]; > + > + txq->qtx_tail = > &hw->db_bar2[rte_be_to_cpu_32(txq->qres->db_index)]; > + txq->qtx_head = > + &hw->cnt_array[rte_be_to_cpu_32(txq->qres->counter_index)]; > + > + rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), txq->ntfy_addr); > + > + dev->data->rx_queue_state[tx_queue_id] = > RTE_ETH_QUEUE_STATE_STARTED; > + > + return 0; > +} > + > +int > +gve_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) > +{ > + struct gve_tx_queue *txq; > + > + if (tx_queue_id >= dev->data->nb_tx_queues) > + return -EINVAL; > + > + txq = dev->data->tx_queues[tx_queue_id]; > + gve_release_txq_mbufs(txq); > + gve_reset_txq(txq); > + > + dev->data->tx_queue_state[tx_queue_id] = > RTE_ETH_QUEUE_STATE_STOPPED; > + > + return 0; > +} > + > void > gve_stop_tx_queues(struct rte_eth_dev *dev) > { > struct gve_priv *hw = dev->data->dev_private; > - struct gve_tx_queue *txq; > uint16_t i; > int err; > > @@ -679,9 +717,13 @@ gve_stop_tx_queues(struct rte_eth_dev *dev) > if (err != 0) > PMD_DRV_LOG(WARNING, "failed to destroy txqs"); > > - for (i = 0; i < dev->data->nb_tx_queues; i++) { > - txq = dev->data->tx_queues[i]; > - gve_release_txq_mbufs(txq); > - gve_reset_txq(txq); > - } > + for (i = 0; i < dev->data->nb_tx_queues; i++) > + if (gve_tx_queue_stop(dev, i) != 0) > + PMD_DRV_LOG(WARNING, "Fail to stop Tx queue %d", > i); > +} > + > +void > +gve_set_tx_function(struct rte_eth_dev *dev) > +{ > + dev->tx_pkt_burst = gve_tx_burst; > } > diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c > index b38eeaea4b..e0d144835b 100644 > --- a/drivers/net/gve/gve_tx_dqo.c > +++ b/drivers/net/gve/gve_tx_dqo.c > @@ -373,11 +373,49 @@ gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, > uint16_t queue_id, > return err; > } > > +int > +gve_tx_queue_start_dqo(struct rte_eth_dev *dev, uint16_t tx_queue_id) > +{ > + struct gve_priv *hw = dev->data->dev_private; > + struct gve_tx_queue *txq; > + > + if (tx_queue_id >= dev->data->nb_tx_queues) > + return -EINVAL; > + > + txq = dev->data->tx_queues[tx_queue_id]; > + > + txq->qtx_tail = > &hw->db_bar2[rte_be_to_cpu_32(txq->qres->db_index)]; > + txq->qtx_head = > + &hw->cnt_array[rte_be_to_cpu_32(txq->qres->counter_index)]; > + > + rte_write32(rte_cpu_to_be_32(GVE_IRQ_MASK), txq->ntfy_addr); > + > + dev->data->rx_queue_state[tx_queue_id] = > RTE_ETH_QUEUE_STATE_STARTED; > + > + return 0; > +} > + > +int > +gve_tx_queue_stop_dqo(struct rte_eth_dev *dev, uint16_t tx_queue_id) > +{ > + struct gve_tx_queue *txq; > + > + if (tx_queue_id >= dev->data->nb_tx_queues) > + return -EINVAL; > + > + txq = dev->data->tx_queues[tx_queue_id]; > + gve_release_txq_mbufs_dqo(txq); > + gve_reset_txq_dqo(txq); > + > + dev->data->tx_queue_state[tx_queue_id] = > RTE_ETH_QUEUE_STATE_STOPPED; > + > + return 0; > +} > + > void > gve_stop_tx_queues_dqo(struct rte_eth_dev *dev) > { > struct gve_priv *hw = dev->data->dev_private; > - struct gve_tx_queue *txq; > uint16_t i; > int err; > > @@ -385,9 +423,13 @@ gve_stop_tx_queues_dqo(struct rte_eth_dev *dev) > if (err != 0) > PMD_DRV_LOG(WARNING, "failed to destroy txqs"); > > - for (i = 0; i < dev->data->nb_tx_queues; i++) { > - txq = dev->data->tx_queues[i]; > - gve_release_txq_mbufs_dqo(txq); > - gve_reset_txq_dqo(txq); > - } > + for (i = 0; i < dev->data->nb_tx_queues; i++) > + if (gve_tx_queue_stop_dqo(dev, i) != 0) > + PMD_DRV_LOG(WARNING, "Fail to stop Tx queue %d", > i); > +} > + > +void > +gve_set_tx_function_dqo(struct rte_eth_dev *dev) > +{ > + dev->tx_pkt_burst = gve_tx_burst_dqo; > } > -- > 2.34.1 > >