DPDK patches and discussions
 help / color / mirror / Atom feed
From: Junfeng Guo <junfeng.guo@intel.com>
To: qi.z.zhang@intel.com, jingjing.wu@intel.com,
	ferruh.yigit@amd.com, beilei.xing@intel.com
Cc: dev@dpdk.org, Junfeng Guo <junfeng.guo@intel.com>,
	Rushil Gupta <rushilg@google.com>,
	Joshua Washington <joshwash@google.com>,
	Jeroen de Borst <jeroendb@google.com>
Subject: [PATCH 04/10] net/gve: support queue release and stop for DQO
Date: Thu, 13 Apr 2023 14:16:44 +0800	[thread overview]
Message-ID: <20230413061650.796940-5-junfeng.guo@intel.com> (raw)
In-Reply-To: <20230413061650.796940-1-junfeng.guo@intel.com>

Add support for queue operations:
 - gve_tx_queue_release_dqo
 - gve_rx_queue_release_dqo
 - gve_stop_tx_queues_dqo
 - gve_stop_rx_queues_dqo

Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
Signed-off-by: Rushil Gupta <rushilg@google.com>
Signed-off-by: Joshua Washington <joshwash@google.com>
Signed-off-by: Jeroen de Borst <jeroendb@google.com>
---
 drivers/net/gve/gve_ethdev.c | 18 +++++++++---
 drivers/net/gve/gve_ethdev.h | 12 ++++++++
 drivers/net/gve/gve_rx.c     |  3 ++
 drivers/net/gve/gve_rx_dqo.c | 57 ++++++++++++++++++++++++++++++++++++
 drivers/net/gve/gve_tx.c     |  3 ++
 drivers/net/gve/gve_tx_dqo.c | 55 ++++++++++++++++++++++++++++++++++
 6 files changed, 144 insertions(+), 4 deletions(-)

diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index fc60db63c5..340315a1a3 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -292,11 +292,19 @@ gve_dev_close(struct rte_eth_dev *dev)
 			PMD_DRV_LOG(ERR, "Failed to stop dev.");
 	}
 
-	for (i = 0; i < dev->data->nb_tx_queues; i++)
-		gve_tx_queue_release(dev, i);
+	if (gve_is_gqi(priv)) {
+		for (i = 0; i < dev->data->nb_tx_queues; i++)
+			gve_tx_queue_release(dev, i);
+
+		for (i = 0; i < dev->data->nb_rx_queues; i++)
+			gve_rx_queue_release(dev, i);
+	} else {
+		for (i = 0; i < dev->data->nb_tx_queues; i++)
+			gve_tx_queue_release_dqo(dev, i);
 
-	for (i = 0; i < dev->data->nb_rx_queues; i++)
-		gve_rx_queue_release(dev, i);
+		for (i = 0; i < dev->data->nb_rx_queues; i++)
+			gve_rx_queue_release_dqo(dev, i);
+	}
 
 	gve_free_qpls(priv);
 	rte_free(priv->adminq);
@@ -578,6 +586,8 @@ static const struct eth_dev_ops gve_eth_dev_ops_dqo = {
 	.dev_infos_get        = gve_dev_info_get,
 	.rx_queue_setup       = gve_rx_queue_setup_dqo,
 	.tx_queue_setup       = gve_tx_queue_setup_dqo,
+	.rx_queue_release     = gve_rx_queue_release_dqo,
+	.tx_queue_release     = gve_tx_queue_release_dqo,
 	.link_update          = gve_link_update,
 	.stats_get            = gve_dev_stats_get,
 	.stats_reset          = gve_dev_stats_reset,
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index cb8cd62886..c8e1dd1435 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -378,4 +378,16 @@ gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
 		       uint16_t nb_desc, unsigned int socket_id,
 		       const struct rte_eth_txconf *conf);
 
+void
+gve_tx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid);
+
+void
+gve_rx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid);
+
+void
+gve_stop_tx_queues_dqo(struct rte_eth_dev *dev);
+
+void
+gve_stop_rx_queues_dqo(struct rte_eth_dev *dev);
+
 #endif /* _GVE_ETHDEV_H_ */
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
index 8d8f94efff..3dd3f578f9 100644
--- a/drivers/net/gve/gve_rx.c
+++ b/drivers/net/gve/gve_rx.c
@@ -359,6 +359,9 @@ gve_stop_rx_queues(struct rte_eth_dev *dev)
 	uint16_t i;
 	int err;
 
+	if (!gve_is_gqi(hw))
+		return gve_stop_rx_queues_dqo(dev);
+
 	err = gve_adminq_destroy_rx_queues(hw, dev->data->nb_rx_queues);
 	if (err != 0)
 		PMD_DRV_LOG(WARNING, "failed to destroy rxqs");
diff --git a/drivers/net/gve/gve_rx_dqo.c b/drivers/net/gve/gve_rx_dqo.c
index c419c4dd2f..7f58844839 100644
--- a/drivers/net/gve/gve_rx_dqo.c
+++ b/drivers/net/gve/gve_rx_dqo.c
@@ -7,6 +7,38 @@
 #include "gve_ethdev.h"
 #include "base/gve_adminq.h"
 
+static inline void
+gve_release_rxq_mbufs_dqo(struct gve_rx_queue *rxq)
+{
+	uint16_t i;
+
+	for (i = 0; i < rxq->nb_rx_desc; i++) {
+		if (rxq->sw_ring[i]) {
+			rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+			rxq->sw_ring[i] = NULL;
+		}
+	}
+
+	rxq->nb_avail = rxq->nb_rx_desc;
+}
+
+void
+gve_rx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid)
+{
+	struct gve_rx_queue *q = dev->data->rx_queues[qid];
+
+	if (q == NULL)
+		return;
+
+	gve_release_rxq_mbufs_dqo(q);
+	rte_free(q->sw_ring);
+	rte_memzone_free(q->compl_ring_mz);
+	rte_memzone_free(q->mz);
+	rte_memzone_free(q->qres_mz);
+	q->qres = NULL;
+	rte_free(q);
+}
+
 static void
 gve_reset_rxq_dqo(struct gve_rx_queue *rxq)
 {
@@ -56,6 +88,12 @@ gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
 	}
 	nb_desc = hw->rx_desc_cnt;
 
+	/* Free memory if needed */
+	if (dev->data->rx_queues[queue_id]) {
+		gve_rx_queue_release_dqo(dev, queue_id);
+		dev->data->rx_queues[queue_id] = NULL;
+	}
+
 	/* Allocate the RX queue data structure. */
 	rxq = rte_zmalloc_socket("gve rxq",
 				 sizeof(struct gve_rx_queue),
@@ -154,3 +192,22 @@ gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
 	rte_free(rxq);
 	return err;
 }
+
+void
+gve_stop_rx_queues_dqo(struct rte_eth_dev *dev)
+{
+	struct gve_priv *hw = dev->data->dev_private;
+	struct gve_rx_queue *rxq;
+	uint16_t i;
+	int err;
+
+	err = gve_adminq_destroy_rx_queues(hw, dev->data->nb_rx_queues);
+	if (err != 0)
+		PMD_DRV_LOG(WARNING, "failed to destroy rxqs");
+
+	for (i = 0; i < dev->data->nb_rx_queues; i++) {
+		rxq = dev->data->rx_queues[i];
+		gve_release_rxq_mbufs_dqo(rxq);
+		gve_reset_rxq_dqo(rxq);
+	}
+}
diff --git a/drivers/net/gve/gve_tx.c b/drivers/net/gve/gve_tx.c
index fee3b939c7..13dc807623 100644
--- a/drivers/net/gve/gve_tx.c
+++ b/drivers/net/gve/gve_tx.c
@@ -672,6 +672,9 @@ gve_stop_tx_queues(struct rte_eth_dev *dev)
 	uint16_t i;
 	int err;
 
+	if (!gve_is_gqi(hw))
+		return gve_stop_tx_queues_dqo(dev);
+
 	err = gve_adminq_destroy_tx_queues(hw, dev->data->nb_tx_queues);
 	if (err != 0)
 		PMD_DRV_LOG(WARNING, "failed to destroy txqs");
diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c
index 22d20ff16f..ea6d5ff85e 100644
--- a/drivers/net/gve/gve_tx_dqo.c
+++ b/drivers/net/gve/gve_tx_dqo.c
@@ -6,6 +6,36 @@
 #include "gve_ethdev.h"
 #include "base/gve_adminq.h"
 
+static inline void
+gve_release_txq_mbufs_dqo(struct gve_tx_queue *txq)
+{
+	uint16_t i;
+
+	for (i = 0; i < txq->sw_size; i++) {
+		if (txq->sw_ring[i]) {
+			rte_pktmbuf_free_seg(txq->sw_ring[i]);
+			txq->sw_ring[i] = NULL;
+		}
+	}
+}
+
+void
+gve_tx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid)
+{
+	struct gve_tx_queue *q = dev->data->tx_queues[qid];
+
+	if (q == NULL)
+		return;
+
+	gve_release_txq_mbufs_dqo(q);
+	rte_free(q->sw_ring);
+	rte_memzone_free(q->mz);
+	rte_memzone_free(q->compl_ring_mz);
+	rte_memzone_free(q->qres_mz);
+	q->qres = NULL;
+	rte_free(q);
+}
+
 static int
 check_tx_thresh_dqo(uint16_t nb_desc, uint16_t tx_rs_thresh,
 		    uint16_t tx_free_thresh)
@@ -91,6 +121,12 @@ gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
 	}
 	nb_desc = hw->tx_desc_cnt;
 
+	/* Free memory if needed. */
+	if (dev->data->tx_queues[queue_id]) {
+		gve_tx_queue_release_dqo(dev, queue_id);
+		dev->data->tx_queues[queue_id] = NULL;
+	}
+
 	/* Allocate the TX queue data structure. */
 	txq = rte_zmalloc_socket("gve txq",
 				 sizeof(struct gve_tx_queue),
@@ -183,3 +219,22 @@ gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
 	rte_free(txq);
 	return err;
 }
+
+void
+gve_stop_tx_queues_dqo(struct rte_eth_dev *dev)
+{
+	struct gve_priv *hw = dev->data->dev_private;
+	struct gve_tx_queue *txq;
+	uint16_t i;
+	int err;
+
+	err = gve_adminq_destroy_tx_queues(hw, dev->data->nb_tx_queues);
+	if (err != 0)
+		PMD_DRV_LOG(WARNING, "failed to destroy txqs");
+
+	for (i = 0; i < dev->data->nb_tx_queues; i++) {
+		txq = dev->data->tx_queues[i];
+		gve_release_txq_mbufs_dqo(txq);
+		gve_reset_txq_dqo(txq);
+	}
+}
-- 
2.34.1


  parent reply	other threads:[~2023-04-13  6:17 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-13  6:16 [PATCH 00/10] gve PMD enhancement Junfeng Guo
2023-04-13  6:16 ` [PATCH 01/10] net/gve: add Tx queue setup for DQO Junfeng Guo
2023-04-13  6:16 ` [PATCH 02/10] net/gve: add Rx " Junfeng Guo
2023-04-13  6:16 ` [PATCH 03/10] net/gve: support device start and close " Junfeng Guo
2023-04-13  6:16 ` Junfeng Guo [this message]
2023-04-13  6:16 ` [PATCH 05/10] net/gve: support basic Tx data path " Junfeng Guo
2023-04-13  6:16 ` [PATCH 06/10] net/gve: support basic Rx " Junfeng Guo
2023-04-13  6:16 ` [PATCH 07/10] net/gve: support basic stats " Junfeng Guo
2023-04-13  6:16 ` [PATCH 08/10] net/gve: enable Tx checksum offload " Junfeng Guo
2023-04-13  6:16 ` [PATCH 09/10] net/gve: add maintainers for GVE Junfeng Guo
2023-05-04 11:01   ` Ferruh Yigit
2023-05-05  2:16     ` Guo, Junfeng
2023-04-13  6:16 ` [PATCH 10/10] net/gve: support jumbo frame for GQI Junfeng Guo
2023-05-04 10:52 ` [PATCH 00/10] gve PMD enhancement Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230413061650.796940-5-junfeng.guo@intel.com \
    --to=junfeng.guo@intel.com \
    --cc=beilei.xing@intel.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@amd.com \
    --cc=jeroendb@google.com \
    --cc=jingjing.wu@intel.com \
    --cc=joshwash@google.com \
    --cc=qi.z.zhang@intel.com \
    --cc=rushilg@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).