DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH 0/4] gve GQ ring size modification
@ 2024-07-17 17:56 Joshua Washington
  2024-07-17 17:56 ` [PATCH 1/4] net/gve: add ring size device option Joshua Washington
                   ` (4 more replies)
  0 siblings, 5 replies; 6+ messages in thread
From: Joshua Washington @ 2024-07-17 17:56 UTC (permalink / raw)
  Cc: dev, Ferruh Yigit, Rushil Gupta, Harshitha Ramamurthy, Joshua Washington

This patch series adds the abiltiy to modify the ring size when using
the GQ queue format for the GVE driver. Before this change, the GQ
driver supported only 1024 descriptors in a ring. With this change, ring
sizes can be as low or has as is specfied by the device. If the device
does not specify limits, the minimum ring size is fixed at 512
descriptors for RX and 256 descriptor for TX, while the maximum ring
size is fixed at 1024 for both RX and TX.

Limitations:
  The ring size must be a power of two.

The DQ queue format should remain unaffected by this change.

Joshua Washington (4):
  net/gve: add ring size device option
  net/gve: remove explicit field for Rx pages per QPL
  net/gve: add min ring size support
  net/gve: add ability to modify ring size in GQ format

 drivers/net/gve/base/gve_adminq.c | 101 +++++++++++++++++++++++-------
 drivers/net/gve/base/gve_adminq.h |  18 ++++++
 drivers/net/gve/gve_ethdev.c      |  24 +++++--
 drivers/net/gve/gve_ethdev.h      |  42 ++++++++-----
 drivers/net/gve/gve_rx.c          |  12 ++--
 drivers/net/gve/gve_tx.c          |  10 +--
 6 files changed, 154 insertions(+), 53 deletions(-)

-- 
2.45.2.803.g4e1b14247a-goog


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 1/4] net/gve: add ring size device option
  2024-07-17 17:56 [PATCH 0/4] gve GQ ring size modification Joshua Washington
@ 2024-07-17 17:56 ` Joshua Washington
  2024-07-17 17:56 ` [PATCH 2/4] net/gve: remove explicit field for Rx pages per QPL Joshua Washington
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Joshua Washington @ 2024-07-17 17:56 UTC (permalink / raw)
  To: Jeroen de Borst, Rushil Gupta, Joshua Washington
  Cc: dev, Ferruh Yigit, Harshitha Ramamurthy

This patch allows the device to report the maximum rx and tx ring
sizes to the driver via a device option. The driver can use the
information allow users to change the ring size of the device when
using the GQ queue format. DQ will be unaffected, as it already
allows DPDK programs to use a ring size of 4096.

Signed-off-by: Joshua Washington <joshwash@google.com>
Reviewed-by: Rushil Gupta <rushilg@google.com>
Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com>
---
 drivers/net/gve/base/gve_adminq.c | 57 ++++++++++++++++++++++++++++---
 drivers/net/gve/base/gve_adminq.h | 11 ++++++
 drivers/net/gve/gve_ethdev.h      |  2 ++
 3 files changed, 66 insertions(+), 4 deletions(-)

diff --git a/drivers/net/gve/base/gve_adminq.c b/drivers/net/gve/base/gve_adminq.c
index 629d15cfbe..eb8bed0d9b 100644
--- a/drivers/net/gve/base/gve_adminq.c
+++ b/drivers/net/gve/base/gve_adminq.c
@@ -33,6 +33,7 @@ void gve_parse_device_option(struct gve_priv *priv,
 			     struct gve_device_option_gqi_rda **dev_op_gqi_rda,
 			     struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
 			     struct gve_device_option_dqo_rda **dev_op_dqo_rda,
+			     struct gve_device_option_modify_ring **dev_op_modify_ring,
 			     struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
 {
 	u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
@@ -105,6 +106,24 @@ void gve_parse_device_option(struct gve_priv *priv,
 		}
 		*dev_op_dqo_rda = RTE_PTR_ADD(option, sizeof(*option));
 		break;
+	case GVE_DEV_OPT_ID_MODIFY_RING:
+		if (option_length < sizeof(**dev_op_modify_ring) ||
+		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING) {
+			PMD_DRV_LOG(WARNING, GVE_DEVICE_OPTION_ERROR_FMT,
+				    "Modify Ring",
+				    (int)sizeof(**dev_op_modify_ring),
+				    GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING,
+				    option_length, req_feat_mask);
+			break;
+		}
+
+		if (option_length > sizeof(**dev_op_modify_ring)) {
+			PMD_DRV_LOG(WARNING,
+				    GVE_DEVICE_OPTION_TOO_BIG_FMT,
+				    "Modify Ring");
+		}
+		*dev_op_modify_ring = RTE_PTR_ADD(option, sizeof(*option));
+		break;
 	case GVE_DEV_OPT_ID_JUMBO_FRAMES:
 		if (option_length < sizeof(**dev_op_jumbo_frames) ||
 		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) {
@@ -139,6 +158,7 @@ gve_process_device_options(struct gve_priv *priv,
 			   struct gve_device_option_gqi_rda **dev_op_gqi_rda,
 			   struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
 			   struct gve_device_option_dqo_rda **dev_op_dqo_rda,
+			   struct gve_device_option_modify_ring **dev_op_modify_ring,
 			   struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
 {
 	const int num_options = be16_to_cpu(descriptor->num_device_options);
@@ -159,7 +179,8 @@ gve_process_device_options(struct gve_priv *priv,
 
 		gve_parse_device_option(priv, dev_opt,
 					dev_op_gqi_rda, dev_op_gqi_qpl,
-					dev_op_dqo_rda, dev_op_jumbo_frames);
+					dev_op_dqo_rda, dev_op_modify_ring,
+					dev_op_jumbo_frames);
 		dev_opt = next_opt;
 	}
 
@@ -693,11 +714,32 @@ gve_set_desc_cnt_dqo(struct gve_priv *priv,
 	return 0;
 }
 
+static void
+gve_set_max_desc_cnt(struct gve_priv *priv,
+	const struct gve_device_option_modify_ring *modify_ring)
+{
+	if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+		PMD_DRV_LOG(DEBUG, "Overriding max ring size from device for DQ "
+			    "queue format to 4096.\n");
+		priv->max_rx_desc_cnt = GVE_MAX_QUEUE_SIZE_DQO;
+		priv->max_tx_desc_cnt = GVE_MAX_QUEUE_SIZE_DQO;
+		return;
+	}
+	priv->max_rx_desc_cnt = modify_ring->max_rx_ring_size;
+	priv->max_tx_desc_cnt = modify_ring->max_tx_ring_size;
+}
+
 static void gve_enable_supported_features(struct gve_priv *priv,
-					  u32 supported_features_mask,
-					  const struct gve_device_option_jumbo_frames
-						  *dev_op_jumbo_frames)
+	u32 supported_features_mask,
+	const struct gve_device_option_modify_ring *dev_op_modify_ring,
+	const struct gve_device_option_jumbo_frames *dev_op_jumbo_frames)
 {
+	if (dev_op_modify_ring &&
+	    (supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) {
+		PMD_DRV_LOG(INFO, "MODIFY RING device option enabled.");
+		gve_set_max_desc_cnt(priv, dev_op_modify_ring);
+	}
+
 	/* Before control reaches this point, the page-size-capped max MTU from
 	 * the gve_device_descriptor field has already been stored in
 	 * priv->dev->max_mtu. We overwrite it with the true max MTU below.
@@ -712,6 +754,7 @@ static void gve_enable_supported_features(struct gve_priv *priv,
 int gve_adminq_describe_device(struct gve_priv *priv)
 {
 	struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
+	struct gve_device_option_modify_ring *dev_op_modify_ring = NULL;
 	struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
 	struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
 	struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
@@ -739,6 +782,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
 
 	err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
 					 &dev_op_gqi_qpl, &dev_op_dqo_rda,
+					 &dev_op_modify_ring,
 					 &dev_op_jumbo_frames);
 	if (err)
 		goto free_device_descriptor;
@@ -775,6 +819,10 @@ int gve_adminq_describe_device(struct gve_priv *priv)
 	if (err)
 		goto free_device_descriptor;
 
+	/* Default max to current in case modify ring size option is disabled. */
+	priv->max_tx_desc_cnt = priv->tx_desc_cnt;
+	priv->max_rx_desc_cnt = priv->rx_desc_cnt;
+
 	priv->max_registered_pages =
 				be64_to_cpu(descriptor->max_registered_pages);
 	mtu = be16_to_cpu(descriptor->mtu);
@@ -800,6 +848,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
 	priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
 
 	gve_enable_supported_features(priv, supported_features_mask,
+				      dev_op_modify_ring,
 				      dev_op_jumbo_frames);
 
 free_device_descriptor:
diff --git a/drivers/net/gve/base/gve_adminq.h b/drivers/net/gve/base/gve_adminq.h
index 24abd945cc..ff69f74d69 100644
--- a/drivers/net/gve/base/gve_adminq.h
+++ b/drivers/net/gve/base/gve_adminq.h
@@ -110,6 +110,14 @@ struct gve_device_option_dqo_rda {
 
 GVE_CHECK_STRUCT_LEN(8, gve_device_option_dqo_rda);
 
+struct gve_device_option_modify_ring {
+	__be32 supported_features_mask;
+	__be16 max_rx_ring_size;
+	__be16 max_tx_ring_size;
+};
+
+GVE_CHECK_STRUCT_LEN(8, gve_device_option_modify_ring);
+
 struct gve_device_option_jumbo_frames {
 	__be32 supported_features_mask;
 	__be16 max_mtu;
@@ -131,6 +139,7 @@ enum gve_dev_opt_id {
 	GVE_DEV_OPT_ID_GQI_RDA = 0x2,
 	GVE_DEV_OPT_ID_GQI_QPL = 0x3,
 	GVE_DEV_OPT_ID_DQO_RDA = 0x4,
+	GVE_DEV_OPT_ID_MODIFY_RING = 0x6,
 	GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
 };
 
@@ -139,10 +148,12 @@ enum gve_dev_opt_req_feat_mask {
 	GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
 	GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
 	GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
+	GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0,
 	GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
 };
 
 enum gve_sup_feature_mask {
+	GVE_SUP_MODIFY_RING_MASK = 1 << 0,
 	GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
 };
 
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index ca8c6404fd..1731c2ab93 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -234,6 +234,8 @@ struct gve_priv {
 	const struct rte_memzone *cnt_array_mz;
 
 	uint16_t num_event_counters;
+	uint16_t max_tx_desc_cnt;
+	uint16_t max_rx_desc_cnt;
 	uint16_t tx_desc_cnt; /* txq size */
 	uint16_t rx_desc_cnt; /* rxq size */
 	uint16_t tx_pages_per_qpl; /* tx buffer length */
-- 
2.45.2.803.g4e1b14247a-goog


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 2/4] net/gve: remove explicit field for Rx pages per QPL
  2024-07-17 17:56 [PATCH 0/4] gve GQ ring size modification Joshua Washington
  2024-07-17 17:56 ` [PATCH 1/4] net/gve: add ring size device option Joshua Washington
@ 2024-07-17 17:56 ` Joshua Washington
  2024-07-17 17:56 ` [PATCH 3/4] net/gve: add min ring size support Joshua Washington
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Joshua Washington @ 2024-07-17 17:56 UTC (permalink / raw)
  To: Jeroen de Borst, Rushil Gupta, Joshua Washington
  Cc: dev, Ferruh Yigit, Harshitha Ramamurthy

priv->rx_data_slot_cnt is a field that holds the number of pages per RX
QPL as passed by the device. With the modify ring size feature, this
field no longer makes sense, as there is a clearly defined relationship
on GQ between the ring size and the number of pages per RX QPL (1:1). As
such, when a user modifies the ring size, this value would be useless,
as the ring size dictates the number of pages in the QPL, and each QPL
"knows" how many pages it holds.

tx_pages_per_qpl is unaffected by this because TX rings use a static
number of pages per QPL.

Signed-off-by: Joshua Washington <joshwash@google.com>
Reviewed-by: Rushil Gupta <rushilg@google.com>
Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com>
---
 drivers/net/gve/base/gve_adminq.c | 7 -------
 drivers/net/gve/gve_ethdev.h      | 3 +--
 drivers/net/gve/gve_rx.c          | 2 +-
 3 files changed, 2 insertions(+), 10 deletions(-)

diff --git a/drivers/net/gve/base/gve_adminq.c b/drivers/net/gve/base/gve_adminq.c
index eb8bed0d9b..c25fefbd0f 100644
--- a/drivers/net/gve/base/gve_adminq.c
+++ b/drivers/net/gve/base/gve_adminq.c
@@ -837,14 +837,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
 	PMD_DRV_LOG(INFO, "MAC addr: " RTE_ETHER_ADDR_PRT_FMT,
 		    RTE_ETHER_ADDR_BYTES(&priv->dev_addr));
 	priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
-	priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl);
 
-	if (gve_is_gqi(priv) && priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
-		PMD_DRV_LOG(ERR,
-			    "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d",
-			    priv->rx_data_slot_cnt);
-		priv->rx_desc_cnt = priv->rx_data_slot_cnt;
-	}
 	priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
 
 	gve_enable_supported_features(priv, supported_features_mask,
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index 1731c2ab93..393a4362c9 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -238,8 +238,7 @@ struct gve_priv {
 	uint16_t max_rx_desc_cnt;
 	uint16_t tx_desc_cnt; /* txq size */
 	uint16_t rx_desc_cnt; /* rxq size */
-	uint16_t tx_pages_per_qpl; /* tx buffer length */
-	uint16_t rx_data_slot_cnt; /* rx buffer length */
+	uint16_t tx_pages_per_qpl;
 
 	/* Only valid for DQO_RDA queue format */
 	uint16_t tx_compq_size; /* tx completion queue size */
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
index 41987ec870..d2c6920406 100644
--- a/drivers/net/gve/gve_rx.c
+++ b/drivers/net/gve/gve_rx.c
@@ -386,7 +386,7 @@ gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	/* Allocate and register QPL for the queue. */
 	if (rxq->is_gqi_qpl) {
 		rxq->qpl = gve_setup_queue_page_list(hw, queue_id, true,
-						     hw->rx_data_slot_cnt);
+						     nb_desc);
 		if (!rxq->qpl) {
 			PMD_DRV_LOG(ERR,
 				    "Failed to alloc rx qpl for queue %hu.",
-- 
2.45.2.803.g4e1b14247a-goog


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 3/4] net/gve: add min ring size support
  2024-07-17 17:56 [PATCH 0/4] gve GQ ring size modification Joshua Washington
  2024-07-17 17:56 ` [PATCH 1/4] net/gve: add ring size device option Joshua Washington
  2024-07-17 17:56 ` [PATCH 2/4] net/gve: remove explicit field for Rx pages per QPL Joshua Washington
@ 2024-07-17 17:56 ` Joshua Washington
  2024-07-17 17:56 ` [PATCH 4/4] net/gve: add ability to modify ring size in GQ format Joshua Washington
  2024-07-19 18:59 ` [PATCH 0/4] gve GQ ring size modification Ferruh Yigit
  4 siblings, 0 replies; 6+ messages in thread
From: Joshua Washington @ 2024-07-17 17:56 UTC (permalink / raw)
  To: Jeroen de Borst, Rushil Gupta, Joshua Washington
  Cc: dev, Ferruh Yigit, Harshitha Ramamurthy

This change adds support for parsing the minimum ring size from the
modify_ring_size device option. Like the maximum ring size, this field
will help allow the alteration of the ring size on the GQ driver.

Note that it is optional whether the ring size is passed from the device
or not. If the device does not pass minimum ring sizes, they are set to
static values.

Signed-off-by: Joshua Washington <joshwash@google.com>
Reviewed-by: Rushil Gupta <rushilg@google.com>
Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com>
---
 drivers/net/gve/base/gve_adminq.c | 45 ++++++++++++++++++++-----------
 drivers/net/gve/base/gve_adminq.h | 13 ++++++---
 drivers/net/gve/gve_ethdev.c      | 28 ++++++++++++++-----
 drivers/net/gve/gve_ethdev.h      | 37 ++++++++++++++++---------
 drivers/net/gve/gve_rx.c          |  6 ++---
 drivers/net/gve/gve_tx.c          |  6 ++---
 6 files changed, 92 insertions(+), 43 deletions(-)

diff --git a/drivers/net/gve/base/gve_adminq.c b/drivers/net/gve/base/gve_adminq.c
index c25fefbd0f..72c05c8237 100644
--- a/drivers/net/gve/base/gve_adminq.c
+++ b/drivers/net/gve/base/gve_adminq.c
@@ -15,6 +15,9 @@
 
 #define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected. Possible older version of guest driver."
 
+static void gve_set_min_desc_cnt(struct gve_priv *priv,
+	struct gve_device_option_modify_ring *dev_op_modify_ring);
+
 static
 struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor,
 					      struct gve_device_option *option)
@@ -107,7 +110,9 @@ void gve_parse_device_option(struct gve_priv *priv,
 		*dev_op_dqo_rda = RTE_PTR_ADD(option, sizeof(*option));
 		break;
 	case GVE_DEV_OPT_ID_MODIFY_RING:
-		if (option_length < sizeof(**dev_op_modify_ring) ||
+		/* Min ring size bound is optional. */
+		if (option_length < (sizeof(**dev_op_modify_ring) -
+			sizeof(struct gve_ring_size_bound)) ||
 		    req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING) {
 			PMD_DRV_LOG(WARNING, GVE_DEVICE_OPTION_ERROR_FMT,
 				    "Modify Ring",
@@ -123,6 +128,10 @@ void gve_parse_device_option(struct gve_priv *priv,
 				    "Modify Ring");
 		}
 		*dev_op_modify_ring = RTE_PTR_ADD(option, sizeof(*option));
+
+		/* Min ring size included; set the minimum ring size. */
+		if (option_length == sizeof(**dev_op_modify_ring))
+			gve_set_min_desc_cnt(priv, *dev_op_modify_ring);
 		break;
 	case GVE_DEV_OPT_ID_JUMBO_FRAMES:
 		if (option_length < sizeof(**dev_op_jumbo_frames) ||
@@ -686,16 +695,17 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
 static int gve_set_desc_cnt(struct gve_priv *priv,
 			    struct gve_device_descriptor *descriptor)
 {
-	priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
-	if (priv->tx_desc_cnt * sizeof(priv->txqs[0]->tx_desc_ring[0])
+	priv->default_tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
+	if (priv->default_tx_desc_cnt * sizeof(priv->txqs[0]->tx_desc_ring[0])
 	    < PAGE_SIZE) {
-		PMD_DRV_LOG(ERR, "Tx desc count %d too low", priv->tx_desc_cnt);
+		PMD_DRV_LOG(ERR, "Tx desc count %d too low",
+			    priv->default_tx_desc_cnt);
 		return -EINVAL;
 	}
-	priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
-	if (priv->rx_desc_cnt * sizeof(priv->rxqs[0]->rx_desc_ring[0])
+	priv->default_rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
+	if (priv->default_rx_desc_cnt * sizeof(priv->rxqs[0]->rx_desc_ring[0])
 	    < PAGE_SIZE) {
-		PMD_DRV_LOG(ERR, "Rx desc count %d too low", priv->rx_desc_cnt);
+		PMD_DRV_LOG(ERR, "Rx desc count %d too low", priv->default_rx_desc_cnt);
 		return -EINVAL;
 	}
 	return 0;
@@ -706,14 +716,22 @@ gve_set_desc_cnt_dqo(struct gve_priv *priv,
 		     const struct gve_device_descriptor *descriptor,
 		     const struct gve_device_option_dqo_rda *dev_op_dqo_rda)
 {
-	priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
+	priv->default_tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
 	priv->tx_compq_size = be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries);
-	priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
+	priv->default_rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
 	priv->rx_bufq_size = be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries);
 
 	return 0;
 }
 
+static void
+gve_set_min_desc_cnt(struct gve_priv *priv,
+	struct gve_device_option_modify_ring *modify_ring)
+{
+	priv->min_rx_desc_cnt = be16_to_cpu(modify_ring->min_ring_size.rx);
+	priv->min_tx_desc_cnt = be16_to_cpu(modify_ring->min_ring_size.tx);
+}
+
 static void
 gve_set_max_desc_cnt(struct gve_priv *priv,
 	const struct gve_device_option_modify_ring *modify_ring)
@@ -725,8 +743,8 @@ gve_set_max_desc_cnt(struct gve_priv *priv,
 		priv->max_tx_desc_cnt = GVE_MAX_QUEUE_SIZE_DQO;
 		return;
 	}
-	priv->max_rx_desc_cnt = modify_ring->max_rx_ring_size;
-	priv->max_tx_desc_cnt = modify_ring->max_tx_ring_size;
+	priv->max_rx_desc_cnt = be16_to_cpu(modify_ring->max_ring_size.rx);
+	priv->max_tx_desc_cnt = be16_to_cpu(modify_ring->max_ring_size.tx);
 }
 
 static void gve_enable_supported_features(struct gve_priv *priv,
@@ -737,6 +755,7 @@ static void gve_enable_supported_features(struct gve_priv *priv,
 	if (dev_op_modify_ring &&
 	    (supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) {
 		PMD_DRV_LOG(INFO, "MODIFY RING device option enabled.");
+		/* Min ring size set separately by virtue of it being optional. */
 		gve_set_max_desc_cnt(priv, dev_op_modify_ring);
 	}
 
@@ -819,10 +838,6 @@ int gve_adminq_describe_device(struct gve_priv *priv)
 	if (err)
 		goto free_device_descriptor;
 
-	/* Default max to current in case modify ring size option is disabled. */
-	priv->max_tx_desc_cnt = priv->tx_desc_cnt;
-	priv->max_rx_desc_cnt = priv->rx_desc_cnt;
-
 	priv->max_registered_pages =
 				be64_to_cpu(descriptor->max_registered_pages);
 	mtu = be16_to_cpu(descriptor->mtu);
diff --git a/drivers/net/gve/base/gve_adminq.h b/drivers/net/gve/base/gve_adminq.h
index ff69f74d69..6a3d4691b5 100644
--- a/drivers/net/gve/base/gve_adminq.h
+++ b/drivers/net/gve/base/gve_adminq.h
@@ -110,13 +110,20 @@ struct gve_device_option_dqo_rda {
 
 GVE_CHECK_STRUCT_LEN(8, gve_device_option_dqo_rda);
 
+struct gve_ring_size_bound {
+	__be16 rx;
+	__be16 tx;
+};
+
+GVE_CHECK_STRUCT_LEN(4, gve_ring_size_bound);
+
 struct gve_device_option_modify_ring {
 	__be32 supported_features_mask;
-	__be16 max_rx_ring_size;
-	__be16 max_tx_ring_size;
+	struct gve_ring_size_bound max_ring_size;
+	struct gve_ring_size_bound min_ring_size;
 };
 
-GVE_CHECK_STRUCT_LEN(8, gve_device_option_modify_ring);
+GVE_CHECK_STRUCT_LEN(12, gve_device_option_modify_ring);
 
 struct gve_device_option_jumbo_frames {
 	__be32 supported_features_mask;
diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index ca92277a68..603644735d 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -508,17 +508,21 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.offloads = 0,
 	};
 
-	dev_info->default_rxportconf.ring_size = priv->rx_desc_cnt;
+	dev_info->default_rxportconf.ring_size = priv->default_rx_desc_cnt;
 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
-		.nb_max = gve_is_gqi(priv) ? priv->rx_desc_cnt : GVE_MAX_QUEUE_SIZE_DQO,
-		.nb_min = priv->rx_desc_cnt,
+		.nb_max = gve_is_gqi(priv) ?
+			priv->default_rx_desc_cnt :
+			GVE_MAX_QUEUE_SIZE_DQO,
+		.nb_min = priv->default_rx_desc_cnt,
 		.nb_align = 1,
 	};
 
-	dev_info->default_txportconf.ring_size = priv->tx_desc_cnt;
+	dev_info->default_txportconf.ring_size = priv->default_tx_desc_cnt;
 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
-		.nb_max = gve_is_gqi(priv) ? priv->tx_desc_cnt : GVE_MAX_QUEUE_SIZE_DQO,
-		.nb_min = priv->tx_desc_cnt,
+		.nb_max = gve_is_gqi(priv) ?
+			priv->default_tx_desc_cnt :
+			GVE_MAX_QUEUE_SIZE_DQO,
+		.nb_min = priv->default_tx_desc_cnt,
 		.nb_align = 1,
 	};
 
@@ -1088,6 +1092,15 @@ gve_setup_device_resources(struct gve_priv *priv)
 	return err;
 }
 
+static void
+gve_set_default_ring_size_bounds(struct gve_priv *priv)
+{
+	priv->max_tx_desc_cnt = GVE_DEFAULT_MAX_RING_SIZE;
+	priv->max_rx_desc_cnt = GVE_DEFAULT_MAX_RING_SIZE;
+	priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE;
+	priv->min_rx_desc_cnt = GVE_DEFAULT_MIN_RX_RING_SIZE;
+}
+
 static int
 gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
 {
@@ -1106,6 +1119,9 @@ gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
 		goto free_adminq;
 	}
 
+	/* Set default descriptor counts */
+	gve_set_default_ring_size_bounds(priv);
+
 	if (skip_describe_device)
 		goto setup_device;
 
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index 393a4362c9..c417a0b31c 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -15,19 +15,23 @@
 /* TODO: this is a workaround to ensure that Tx complq is enough */
 #define DQO_TX_MULTIPLIER 4
 
-#define GVE_DEFAULT_RX_FREE_THRESH   64
-#define GVE_DEFAULT_TX_FREE_THRESH   32
-#define GVE_DEFAULT_TX_RS_THRESH     32
-#define GVE_TX_MAX_FREE_SZ          512
+#define GVE_DEFAULT_MAX_RING_SIZE	1024
+#define GVE_DEFAULT_MIN_RX_RING_SIZE	512
+#define GVE_DEFAULT_MIN_TX_RING_SIZE	256
 
-#define GVE_RX_BUF_ALIGN_DQO        128
-#define GVE_RX_MIN_BUF_SIZE_DQO    1024
-#define GVE_RX_MAX_BUF_SIZE_DQO    ((16 * 1024) - GVE_RX_BUF_ALIGN_DQO)
-#define GVE_MAX_QUEUE_SIZE_DQO     4096
+#define GVE_DEFAULT_RX_FREE_THRESH	64
+#define GVE_DEFAULT_TX_FREE_THRESH	32
+#define GVE_DEFAULT_TX_RS_THRESH	32
+#define GVE_TX_MAX_FREE_SZ		512
 
-#define GVE_RX_BUF_ALIGN_GQI       2048
-#define GVE_RX_MIN_BUF_SIZE_GQI    2048
-#define GVE_RX_MAX_BUF_SIZE_GQI    4096
+#define GVE_RX_BUF_ALIGN_DQO		128
+#define GVE_RX_MIN_BUF_SIZE_DQO		1024
+#define GVE_RX_MAX_BUF_SIZE_DQO		((16 * 1024) - GVE_RX_BUF_ALIGN_DQO)
+#define GVE_MAX_QUEUE_SIZE_DQO		4096
+
+#define GVE_RX_BUF_ALIGN_GQI		2048
+#define GVE_RX_MIN_BUF_SIZE_GQI		2048
+#define GVE_RX_MAX_BUF_SIZE_GQI		4096
 
 #define GVE_RSS_HASH_KEY_SIZE 40
 #define GVE_RSS_INDIR_SIZE 128
@@ -234,10 +238,17 @@ struct gve_priv {
 	const struct rte_memzone *cnt_array_mz;
 
 	uint16_t num_event_counters;
+
+	/* TX ring size default and limits. */
+	uint16_t default_tx_desc_cnt;
 	uint16_t max_tx_desc_cnt;
+	uint16_t min_tx_desc_cnt;
+
+	/* RX ring size default and limits. */
+	uint16_t default_rx_desc_cnt;
 	uint16_t max_rx_desc_cnt;
-	uint16_t tx_desc_cnt; /* txq size */
-	uint16_t rx_desc_cnt; /* rxq size */
+	uint16_t min_rx_desc_cnt;
+
 	uint16_t tx_pages_per_qpl;
 
 	/* Only valid for DQO_RDA queue format */
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
index d2c6920406..43cb368be9 100644
--- a/drivers/net/gve/gve_rx.c
+++ b/drivers/net/gve/gve_rx.c
@@ -304,11 +304,11 @@ gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	uint32_t mbuf_len;
 	int err = 0;
 
-	if (nb_desc != hw->rx_desc_cnt) {
+	if (nb_desc != hw->default_rx_desc_cnt) {
 		PMD_DRV_LOG(WARNING, "gve doesn't support nb_desc config, use hw nb_desc %u.",
-			    hw->rx_desc_cnt);
+			    hw->default_rx_desc_cnt);
 	}
-	nb_desc = hw->rx_desc_cnt;
+	nb_desc = hw->default_rx_desc_cnt;
 
 	/* Free memory if needed. */
 	if (dev->data->rx_queues[queue_id]) {
diff --git a/drivers/net/gve/gve_tx.c b/drivers/net/gve/gve_tx.c
index 70d3ef060c..8c255bd0f2 100644
--- a/drivers/net/gve/gve_tx.c
+++ b/drivers/net/gve/gve_tx.c
@@ -559,11 +559,11 @@ gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,
 	uint16_t free_thresh;
 	int err = 0;
 
-	if (nb_desc != hw->tx_desc_cnt) {
+	if (nb_desc != hw->default_tx_desc_cnt) {
 		PMD_DRV_LOG(WARNING, "gve doesn't support nb_desc config, use hw nb_desc %u.",
-			    hw->tx_desc_cnt);
+			    hw->default_tx_desc_cnt);
 	}
-	nb_desc = hw->tx_desc_cnt;
+	nb_desc = hw->default_tx_desc_cnt;
 
 	/* Free memory if needed. */
 	if (dev->data->tx_queues[queue_id]) {
-- 
2.45.2.803.g4e1b14247a-goog


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 4/4] net/gve: add ability to modify ring size in GQ format
  2024-07-17 17:56 [PATCH 0/4] gve GQ ring size modification Joshua Washington
                   ` (2 preceding siblings ...)
  2024-07-17 17:56 ` [PATCH 3/4] net/gve: add min ring size support Joshua Washington
@ 2024-07-17 17:56 ` Joshua Washington
  2024-07-19 18:59 ` [PATCH 0/4] gve GQ ring size modification Ferruh Yigit
  4 siblings, 0 replies; 6+ messages in thread
From: Joshua Washington @ 2024-07-17 17:56 UTC (permalink / raw)
  To: Jeroen de Borst, Rushil Gupta, Joshua Washington
  Cc: dev, Ferruh Yigit, Harshitha Ramamurthy

Before this patch, the GQ format only supported a static ring size
provided by the device. Using the maximum and minimum ring sizes
provided in the modify_ring_size adminq command, this patch enables
the ability to change the ring size within those bounds for the GQ queue
format.

Note that the ring size must be a power of two in size, and any other
value would fail.

Signed-off-by: Joshua Washington <joshwash@google.com>
Reviewed-by: Rushil Gupta <rushilg@google.com>
Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com>
---
 drivers/net/gve/base/gve_adminq.c |  6 ++----
 drivers/net/gve/gve_ethdev.c      | 12 ++++--------
 drivers/net/gve/gve_rx.c          | 10 ++++++----
 drivers/net/gve/gve_tx.c          | 10 ++++++----
 4 files changed, 18 insertions(+), 20 deletions(-)

diff --git a/drivers/net/gve/base/gve_adminq.c b/drivers/net/gve/base/gve_adminq.c
index 72c05c8237..09c6bff026 100644
--- a/drivers/net/gve/base/gve_adminq.c
+++ b/drivers/net/gve/base/gve_adminq.c
@@ -540,6 +540,7 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
 			cpu_to_be64(txq->qres_mz->iova),
 		.tx_ring_addr = cpu_to_be64(txq->tx_ring_phys_addr),
 		.ntfy_id = cpu_to_be32(txq->ntfy_id),
+		.tx_ring_size = cpu_to_be16(txq->nb_tx_desc),
 	};
 
 	if (gve_is_gqi(priv)) {
@@ -548,8 +549,6 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
 
 		cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
 	} else {
-		cmd.create_tx_queue.tx_ring_size =
-			cpu_to_be16(txq->nb_tx_desc);
 		cmd.create_tx_queue.tx_comp_ring_addr =
 			cpu_to_be64(txq->compl_ring_phys_addr);
 		cmd.create_tx_queue.tx_comp_ring_size =
@@ -584,6 +583,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
 		.queue_id = cpu_to_be32(queue_index),
 		.ntfy_id = cpu_to_be32(rxq->ntfy_id),
 		.queue_resources_addr = cpu_to_be64(rxq->qres_mz->iova),
+		.rx_ring_size = cpu_to_be16(rxq->nb_rx_desc),
 	};
 
 	if (gve_is_gqi(priv)) {
@@ -598,8 +598,6 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
 		cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
 		cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rxq->rx_buf_len);
 	} else {
-		cmd.create_rx_queue.rx_ring_size =
-			cpu_to_be16(rxq->nb_rx_desc);
 		cmd.create_rx_queue.rx_desc_ring_addr =
 			cpu_to_be64(rxq->compl_ring_phys_addr);
 		cmd.create_rx_queue.rx_data_ring_addr =
diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index 603644735d..2a674e748f 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -510,19 +510,15 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
 	dev_info->default_rxportconf.ring_size = priv->default_rx_desc_cnt;
 	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
-		.nb_max = gve_is_gqi(priv) ?
-			priv->default_rx_desc_cnt :
-			GVE_MAX_QUEUE_SIZE_DQO,
-		.nb_min = priv->default_rx_desc_cnt,
+		.nb_max = priv->max_rx_desc_cnt,
+		.nb_min = priv->min_rx_desc_cnt,
 		.nb_align = 1,
 	};
 
 	dev_info->default_txportconf.ring_size = priv->default_tx_desc_cnt;
 	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
-		.nb_max = gve_is_gqi(priv) ?
-			priv->default_tx_desc_cnt :
-			GVE_MAX_QUEUE_SIZE_DQO,
-		.nb_min = priv->default_tx_desc_cnt,
+		.nb_max = priv->max_tx_desc_cnt,
+		.nb_min = priv->min_tx_desc_cnt,
 		.nb_align = 1,
 	};
 
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
index 43cb368be9..89b6ef384a 100644
--- a/drivers/net/gve/gve_rx.c
+++ b/drivers/net/gve/gve_rx.c
@@ -304,11 +304,12 @@ gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	uint32_t mbuf_len;
 	int err = 0;
 
-	if (nb_desc != hw->default_rx_desc_cnt) {
-		PMD_DRV_LOG(WARNING, "gve doesn't support nb_desc config, use hw nb_desc %u.",
-			    hw->default_rx_desc_cnt);
+	/* Ring size is required to be a power of two. */
+	if (!rte_is_power_of_2(nb_desc)) {
+		PMD_DRV_LOG(ERR, "Invalid ring size %u. GVE ring size must be a power of 2.\n",
+			    nb_desc);
+		return -EINVAL;
 	}
-	nb_desc = hw->default_rx_desc_cnt;
 
 	/* Free memory if needed. */
 	if (dev->data->rx_queues[queue_id]) {
@@ -388,6 +389,7 @@ gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 		rxq->qpl = gve_setup_queue_page_list(hw, queue_id, true,
 						     nb_desc);
 		if (!rxq->qpl) {
+			err = -ENOMEM;
 			PMD_DRV_LOG(ERR,
 				    "Failed to alloc rx qpl for queue %hu.",
 				    queue_id);
diff --git a/drivers/net/gve/gve_tx.c b/drivers/net/gve/gve_tx.c
index 8c255bd0f2..b1ea36e509 100644
--- a/drivers/net/gve/gve_tx.c
+++ b/drivers/net/gve/gve_tx.c
@@ -559,11 +559,12 @@ gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,
 	uint16_t free_thresh;
 	int err = 0;
 
-	if (nb_desc != hw->default_tx_desc_cnt) {
-		PMD_DRV_LOG(WARNING, "gve doesn't support nb_desc config, use hw nb_desc %u.",
-			    hw->default_tx_desc_cnt);
+	/* Ring size is required to be a power of two. */
+	if (!rte_is_power_of_2(nb_desc)) {
+		PMD_DRV_LOG(ERR, "Invalid ring size %u. GVE ring size must be a power of 2.\n",
+			    nb_desc);
+		return -EINVAL;
 	}
-	nb_desc = hw->default_tx_desc_cnt;
 
 	/* Free memory if needed. */
 	if (dev->data->tx_queues[queue_id]) {
@@ -633,6 +634,7 @@ gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,
 		txq->qpl = gve_setup_queue_page_list(hw, queue_id, false,
 						     hw->tx_pages_per_qpl);
 		if (!txq->qpl) {
+			err = -ENOMEM;
 			PMD_DRV_LOG(ERR, "Failed to alloc tx qpl for queue %hu.",
 				    queue_id);
 			goto err_iov_ring;
-- 
2.45.2.803.g4e1b14247a-goog


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 0/4] gve GQ ring size modification
  2024-07-17 17:56 [PATCH 0/4] gve GQ ring size modification Joshua Washington
                   ` (3 preceding siblings ...)
  2024-07-17 17:56 ` [PATCH 4/4] net/gve: add ability to modify ring size in GQ format Joshua Washington
@ 2024-07-19 18:59 ` Ferruh Yigit
  4 siblings, 0 replies; 6+ messages in thread
From: Ferruh Yigit @ 2024-07-19 18:59 UTC (permalink / raw)
  To: Joshua Washington; +Cc: dev, Rushil Gupta, Harshitha Ramamurthy

On 7/17/2024 6:56 PM, Joshua Washington wrote:
> This patch series adds the abiltiy to modify the ring size when using
> the GQ queue format for the GVE driver. Before this change, the GQ
> driver supported only 1024 descriptors in a ring. With this change, ring
> sizes can be as low or has as is specfied by the device. If the device
> does not specify limits, the minimum ring size is fixed at 512
> descriptors for RX and 256 descriptor for TX, while the maximum ring
> size is fixed at 1024 for both RX and TX.
> 
> Limitations:
>   The ring size must be a power of two.
> 
> The DQ queue format should remain unaffected by this change.
> 
> Joshua Washington (4):
>   net/gve: add ring size device option
>   net/gve: remove explicit field for Rx pages per QPL
>   net/gve: add min ring size support
>   net/gve: add ability to modify ring size in GQ format
>

Series applied to dpdk-next-net/main, thanks.

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2024-07-19 18:59 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-07-17 17:56 [PATCH 0/4] gve GQ ring size modification Joshua Washington
2024-07-17 17:56 ` [PATCH 1/4] net/gve: add ring size device option Joshua Washington
2024-07-17 17:56 ` [PATCH 2/4] net/gve: remove explicit field for Rx pages per QPL Joshua Washington
2024-07-17 17:56 ` [PATCH 3/4] net/gve: add min ring size support Joshua Washington
2024-07-17 17:56 ` [PATCH 4/4] net/gve: add ability to modify ring size in GQ format Joshua Washington
2024-07-19 18:59 ` [PATCH 0/4] gve GQ ring size modification Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).