DPDK patches and discussions
 help / color / mirror / Atom feed
From: Joshua Washington <joshwash@google.com>
To: Jeroen de Borst <jeroendb@google.com>,
	Rushil Gupta <rushilg@google.com>,
	 Joshua Washington <joshwash@google.com>
Cc: dev@dpdk.org, Ferruh Yigit <ferruh.yigit@amd.com>,
	 Harshitha Ramamurthy <hramamurthy@google.com>
Subject: [PATCH 3/4] net/gve: add min ring size support
Date: Wed, 17 Jul 2024 10:56:18 -0700	[thread overview]
Message-ID: <20240717175619.3159026-4-joshwash@google.com> (raw)
In-Reply-To: <20240717175619.3159026-1-joshwash@google.com>

This change adds support for parsing the minimum ring size from the
modify_ring_size device option. Like the maximum ring size, this field
will help allow the alteration of the ring size on the GQ driver.

Note that it is optional whether the ring size is passed from the device
or not. If the device does not pass minimum ring sizes, they are set to
static values.

Signed-off-by: Joshua Washington <joshwash@google.com>
Reviewed-by: Rushil Gupta <rushilg@google.com>
Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com>
---
 drivers/net/gve/base/gve_adminq.c | 45 ++++++++++++++++++++-----------
 drivers/net/gve/base/gve_adminq.h | 13 ++++++---
 drivers/net/gve/gve_ethdev.c      | 28 ++++++++++++++-----
 drivers/net/gve/gve_ethdev.h      | 37 ++++++++++++++++---------
 drivers/net/gve/gve_rx.c          |  6 ++---
 drivers/net/gve/gve_tx.c          |  6 ++---
 6 files changed, 92 insertions(+), 43 deletions(-)

diff --git a/drivers/net/gve/base/gve_adminq.c b/drivers/net/gve/base/gve_adminq.c
index c25fefbd0f..72c05c8237 100644
--- a/drivers/net/gve/base/gve_adminq.c
+++ b/drivers/net/gve/base/gve_adminq.c
@@ -15,6 +15,9 @@
 
 #define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected. Possible older version of guest driver."
 
+static void gve_set_min_desc_cnt(struct gve_priv *priv,
+	struct gve_device_option_modify_ring *dev_op_modify_ring);
+
 static
 struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor,
 					      struct gve_device_option *option)
@@ -107,7 +110,9 @@ void gve_parse_device_option(struct gve_priv *priv,
 		*dev_op_dqo_rda = RTE_PTR_ADD(option, sizeof(*option));
 		break;
 	case GVE_DEV_OPT_ID_MODIFY_RING:
-		if (option_length < sizeof(**dev_op_modify_ring) ||
+		/* Min ring size bound is optional. */
+		if (option_length < (sizeof(**dev_op_modify_ring) -
+			sizeof(struct gve_ring_size_bound)) ||
 		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING) {
 			PMD_DRV_LOG(WARNING, GVE_DEVICE_OPTION_ERROR_FMT,
 				    "Modify Ring",
@@ -123,6 +128,10 @@ void gve_parse_device_option(struct gve_priv *priv,
 				    "Modify Ring");
 		}
 		*dev_op_modify_ring = RTE_PTR_ADD(option, sizeof(*option));
+
+		/* Min ring size included; set the minimum ring size. */
+		if (option_length == sizeof(**dev_op_modify_ring))
+			gve_set_min_desc_cnt(priv, *dev_op_modify_ring);
 		break;
 	case GVE_DEV_OPT_ID_JUMBO_FRAMES:
 		if (option_length < sizeof(**dev_op_jumbo_frames) ||
@@ -686,16 +695,17 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
 static int gve_set_desc_cnt(struct gve_priv *priv,
 			    struct gve_device_descriptor *descriptor)
 {
-	priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
-	if (priv->tx_desc_cnt * sizeof(priv->txqs[0]->tx_desc_ring[0])
+	priv->default_tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
+	if (priv->default_tx_desc_cnt * sizeof(priv->txqs[0]->tx_desc_ring[0])
 	    < PAGE_SIZE) {
-		PMD_DRV_LOG(ERR, "Tx desc count %d too low", priv->tx_desc_cnt);
+		PMD_DRV_LOG(ERR, "Tx desc count %d too low",
+			    priv->default_tx_desc_cnt);
 		return -EINVAL;
 	}
-	priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
-	if (priv->rx_desc_cnt * sizeof(priv->rxqs[0]->rx_desc_ring[0])
+	priv->default_rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
+	if (priv->default_rx_desc_cnt * sizeof(priv->rxqs[0]->rx_desc_ring[0])
 	    < PAGE_SIZE) {
-		PMD_DRV_LOG(ERR, "Rx desc count %d too low", priv->rx_desc_cnt);
+		PMD_DRV_LOG(ERR, "Rx desc count %d too low", priv->default_rx_desc_cnt);
 		return -EINVAL;
 	}
 	return 0;
@@ -706,14 +716,22 @@ gve_set_desc_cnt_dqo(struct gve_priv *priv,
 		     const struct gve_device_descriptor *descriptor,
 		     const struct gve_device_option_dqo_rda *dev_op_dqo_rda)
 {
-	priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
+	priv->default_tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
 	priv->tx_compq_size = be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries);
-	priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
+	priv->default_rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
 	priv->rx_bufq_size = be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries);
 
 	return 0;
 }
 
+static void
+gve_set_min_desc_cnt(struct gve_priv *priv,
+	struct gve_device_option_modify_ring *modify_ring)
+{
+	priv->min_rx_desc_cnt = be16_to_cpu(modify_ring->min_ring_size.rx);
+	priv->min_tx_desc_cnt = be16_to_cpu(modify_ring->min_ring_size.tx);
+}
+
 static void
 gve_set_max_desc_cnt(struct gve_priv *priv,
 	const struct gve_device_option_modify_ring *modify_ring)
@@ -725,8 +743,8 @@ gve_set_max_desc_cnt(struct gve_priv *priv,
 		priv->max_tx_desc_cnt = GVE_MAX_QUEUE_SIZE_DQO;
 		return;
 	}
-	priv->max_rx_desc_cnt = modify_ring->max_rx_ring_size;
-	priv->max_tx_desc_cnt = modify_ring->max_tx_ring_size;
+	priv->max_rx_desc_cnt = be16_to_cpu(modify_ring->max_ring_size.rx);
+	priv->max_tx_desc_cnt = be16_to_cpu(modify_ring->max_ring_size.tx);
 }
 
 static void gve_enable_supported_features(struct gve_priv *priv,
@@ -737,6 +755,7 @@ static void gve_enable_supported_features(struct gve_priv *priv,
 	if (dev_op_modify_ring &&
 	    (supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) {
 		PMD_DRV_LOG(INFO, "MODIFY RING device option enabled.");
+		/* Min ring size set separately by virtue of it being optional. */
 		gve_set_max_desc_cnt(priv, dev_op_modify_ring);
 	}
 
@@ -819,10 +838,6 @@ int gve_adminq_describe_device(struct gve_priv *priv)
 	if (err)
 		goto free_device_descriptor;
 
-	/* Default max to current in case modify ring size option is disabled. */
-	priv->max_tx_desc_cnt = priv->tx_desc_cnt;
-	priv->max_rx_desc_cnt = priv->rx_desc_cnt;
-
 	priv->max_registered_pages =
 				be64_to_cpu(descriptor->max_registered_pages);
 	mtu = be16_to_cpu(descriptor->mtu);
diff --git a/drivers/net/gve/base/gve_adminq.h b/drivers/net/gve/base/gve_adminq.h
index ff69f74d69..6a3d4691b5 100644
--- a/drivers/net/gve/base/gve_adminq.h
+++ b/drivers/net/gve/base/gve_adminq.h
@@ -110,13 +110,20 @@ struct gve_device_option_dqo_rda {
 
 GVE_CHECK_STRUCT_LEN(8, gve_device_option_dqo_rda);
 
+struct gve_ring_size_bound {
+	__be16 rx;
+	__be16 tx;
+};
+
+GVE_CHECK_STRUCT_LEN(4, gve_ring_size_bound);
+
 struct gve_device_option_modify_ring {
 	__be32 supported_features_mask;
-	__be16 max_rx_ring_size;
-	__be16 max_tx_ring_size;
+	struct gve_ring_size_bound max_ring_size;
+	struct gve_ring_size_bound min_ring_size;
 };
 
-GVE_CHECK_STRUCT_LEN(8, gve_device_option_modify_ring);
+GVE_CHECK_STRUCT_LEN(12, gve_device_option_modify_ring);
 
 struct gve_device_option_jumbo_frames {
 	__be32 supported_features_mask;
diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index ca92277a68..603644735d 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -508,17 +508,21 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.offloads = 0,
 	};
 
-	dev_info->default_rxportconf.ring_size = priv->rx_desc_cnt;
+	dev_info->default_rxportconf.ring_size = priv->default_rx_desc_cnt;
 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
-		.nb_max = gve_is_gqi(priv) ? priv->rx_desc_cnt : GVE_MAX_QUEUE_SIZE_DQO,
-		.nb_min = priv->rx_desc_cnt,
+		.nb_max = gve_is_gqi(priv) ?
+			priv->default_rx_desc_cnt :
+			GVE_MAX_QUEUE_SIZE_DQO,
+		.nb_min = priv->default_rx_desc_cnt,
 		.nb_align = 1,
 	};
 
-	dev_info->default_txportconf.ring_size = priv->tx_desc_cnt;
+	dev_info->default_txportconf.ring_size = priv->default_tx_desc_cnt;
 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
-		.nb_max = gve_is_gqi(priv) ? priv->tx_desc_cnt : GVE_MAX_QUEUE_SIZE_DQO,
-		.nb_min = priv->tx_desc_cnt,
+		.nb_max = gve_is_gqi(priv) ?
+			priv->default_tx_desc_cnt :
+			GVE_MAX_QUEUE_SIZE_DQO,
+		.nb_min = priv->default_tx_desc_cnt,
 		.nb_align = 1,
 	};
 
@@ -1088,6 +1092,15 @@ gve_setup_device_resources(struct gve_priv *priv)
 	return err;
 }
 
+static void
+gve_set_default_ring_size_bounds(struct gve_priv *priv)
+{
+	priv->max_tx_desc_cnt = GVE_DEFAULT_MAX_RING_SIZE;
+	priv->max_rx_desc_cnt = GVE_DEFAULT_MAX_RING_SIZE;
+	priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE;
+	priv->min_rx_desc_cnt = GVE_DEFAULT_MIN_RX_RING_SIZE;
+}
+
 static int
 gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
 {
@@ -1106,6 +1119,9 @@ gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
 		goto free_adminq;
 	}
 
+	/* Set default descriptor counts */
+	gve_set_default_ring_size_bounds(priv);
+
 	if (skip_describe_device)
 		goto setup_device;
 
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index 393a4362c9..c417a0b31c 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -15,19 +15,23 @@
 /* TODO: this is a workaround to ensure that Tx complq is enough */
 #define DQO_TX_MULTIPLIER 4
 
-#define GVE_DEFAULT_RX_FREE_THRESH   64
-#define GVE_DEFAULT_TX_FREE_THRESH   32
-#define GVE_DEFAULT_TX_RS_THRESH     32
-#define GVE_TX_MAX_FREE_SZ          512
+#define GVE_DEFAULT_MAX_RING_SIZE	1024
+#define GVE_DEFAULT_MIN_RX_RING_SIZE	512
+#define GVE_DEFAULT_MIN_TX_RING_SIZE	256
 
-#define GVE_RX_BUF_ALIGN_DQO        128
-#define GVE_RX_MIN_BUF_SIZE_DQO    1024
-#define GVE_RX_MAX_BUF_SIZE_DQO    ((16 * 1024) - GVE_RX_BUF_ALIGN_DQO)
-#define GVE_MAX_QUEUE_SIZE_DQO     4096
+#define GVE_DEFAULT_RX_FREE_THRESH	64
+#define GVE_DEFAULT_TX_FREE_THRESH	32
+#define GVE_DEFAULT_TX_RS_THRESH	32
+#define GVE_TX_MAX_FREE_SZ		512
 
-#define GVE_RX_BUF_ALIGN_GQI       2048
-#define GVE_RX_MIN_BUF_SIZE_GQI    2048
-#define GVE_RX_MAX_BUF_SIZE_GQI    4096
+#define GVE_RX_BUF_ALIGN_DQO		128
+#define GVE_RX_MIN_BUF_SIZE_DQO		1024
+#define GVE_RX_MAX_BUF_SIZE_DQO		((16 * 1024) - GVE_RX_BUF_ALIGN_DQO)
+#define GVE_MAX_QUEUE_SIZE_DQO		4096
+
+#define GVE_RX_BUF_ALIGN_GQI		2048
+#define GVE_RX_MIN_BUF_SIZE_GQI		2048
+#define GVE_RX_MAX_BUF_SIZE_GQI		4096
 
 #define GVE_RSS_HASH_KEY_SIZE 40
 #define GVE_RSS_INDIR_SIZE 128
@@ -234,10 +238,17 @@ struct gve_priv {
 	const struct rte_memzone *cnt_array_mz;
 
 	uint16_t num_event_counters;
+
+	/* TX ring size default and limits. */
+	uint16_t default_tx_desc_cnt;
 	uint16_t max_tx_desc_cnt;
+	uint16_t min_tx_desc_cnt;
+
+	/* RX ring size default and limits. */
+	uint16_t default_rx_desc_cnt;
 	uint16_t max_rx_desc_cnt;
-	uint16_t tx_desc_cnt; /* txq size */
-	uint16_t rx_desc_cnt; /* rxq size */
+	uint16_t min_rx_desc_cnt;
+
 	uint16_t tx_pages_per_qpl;
 
 	/* Only valid for DQO_RDA queue format */
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
index d2c6920406..43cb368be9 100644
--- a/drivers/net/gve/gve_rx.c
+++ b/drivers/net/gve/gve_rx.c
@@ -304,11 +304,11 @@ gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	uint32_t mbuf_len;
 	int err = 0;
 
-	if (nb_desc != hw->rx_desc_cnt) {
+	if (nb_desc != hw->default_rx_desc_cnt) {
 		PMD_DRV_LOG(WARNING, "gve doesn't support nb_desc config, use hw nb_desc %u.",
-			    hw->rx_desc_cnt);
+			    hw->default_rx_desc_cnt);
 	}
-	nb_desc = hw->rx_desc_cnt;
+	nb_desc = hw->default_rx_desc_cnt;
 
 	/* Free memory if needed. */
 	if (dev->data->rx_queues[queue_id]) {
diff --git a/drivers/net/gve/gve_tx.c b/drivers/net/gve/gve_tx.c
index 70d3ef060c..8c255bd0f2 100644
--- a/drivers/net/gve/gve_tx.c
+++ b/drivers/net/gve/gve_tx.c
@@ -559,11 +559,11 @@ gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,
 	uint16_t free_thresh;
 	int err = 0;
 
-	if (nb_desc != hw->tx_desc_cnt) {
+	if (nb_desc != hw->default_tx_desc_cnt) {
 		PMD_DRV_LOG(WARNING, "gve doesn't support nb_desc config, use hw nb_desc %u.",
-			    hw->tx_desc_cnt);
+			    hw->default_tx_desc_cnt);
 	}
-	nb_desc = hw->tx_desc_cnt;
+	nb_desc = hw->default_tx_desc_cnt;
 
 	/* Free memory if needed. */
 	if (dev->data->tx_queues[queue_id]) {
-- 
2.45.2.803.g4e1b14247a-goog


  parent reply	other threads:[~2024-07-17 17:56 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-07-17 17:56 [PATCH 0/4] gve GQ ring size modification Joshua Washington
2024-07-17 17:56 ` [PATCH 1/4] net/gve: add ring size device option Joshua Washington
2024-07-17 17:56 ` [PATCH 2/4] net/gve: remove explicit field for Rx pages per QPL Joshua Washington
2024-07-17 17:56 ` Joshua Washington [this message]
2024-07-17 17:56 ` [PATCH 4/4] net/gve: add ability to modify ring size in GQ format Joshua Washington
2024-07-19 18:59 ` [PATCH 0/4] gve GQ ring size modification Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240717175619.3159026-4-joshwash@google.com \
    --to=joshwash@google.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@amd.com \
    --cc=hramamurthy@google.com \
    --cc=jeroendb@google.com \
    --cc=rushilg@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).