* [PATCH] net/gve: add support for 4K ring size only for DQO
@ 2023-11-20 8:04 Rushil Gupta
2023-11-20 16:59 ` Ferruh Yigit
0 siblings, 1 reply; 2+ messages in thread
From: Rushil Gupta @ 2023-11-20 8:04 UTC (permalink / raw)
To: junfeng.guo, jeroendb, joshwash, ferruh.yigit; +Cc: dev, Rushil Gupta
Bump up the dpdk dqo driver ring size to 4096.
The queue size is controlled by queue_setup method.
Signed-off-by: Rushil Gupta <rushilg@google.com>
Reviewed-by: Joshua Washington <joshwash@google.com>
---
drivers/net/gve/base/gve_adminq.c | 8 ++++----
drivers/net/gve/gve_ethdev.c | 4 ++--
drivers/net/gve/gve_ethdev.h | 1 +
drivers/net/gve/gve_rx_dqo.c | 6 ------
drivers/net/gve/gve_tx_dqo.c | 6 ------
5 files changed, 7 insertions(+), 18 deletions(-)
diff --git a/drivers/net/gve/base/gve_adminq.c b/drivers/net/gve/base/gve_adminq.c
index 41202725e6..343bd13d67 100644
--- a/drivers/net/gve/base/gve_adminq.c
+++ b/drivers/net/gve/base/gve_adminq.c
@@ -516,11 +516,11 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
} else {
cmd.create_tx_queue.tx_ring_size =
- cpu_to_be16(priv->tx_desc_cnt);
+ cpu_to_be16(txq->nb_tx_desc);
cmd.create_tx_queue.tx_comp_ring_addr =
cpu_to_be64(txq->compl_ring_phys_addr);
cmd.create_tx_queue.tx_comp_ring_size =
- cpu_to_be16(priv->tx_compq_size * DQO_TX_MULTIPLIER);
+ cpu_to_be16(txq->sw_size);
}
return gve_adminq_issue_cmd(priv, &cmd);
@@ -566,7 +566,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rxq->rx_buf_len);
} else {
cmd.create_rx_queue.rx_ring_size =
- cpu_to_be16(priv->rx_desc_cnt);
+ cpu_to_be16(rxq->nb_rx_desc);
cmd.create_rx_queue.rx_desc_ring_addr =
cpu_to_be64(rxq->compl_ring_phys_addr);
cmd.create_rx_queue.rx_data_ring_addr =
@@ -574,7 +574,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
cmd.create_rx_queue.packet_buffer_size =
cpu_to_be16(rxq->rx_buf_len);
cmd.create_rx_queue.rx_buff_ring_size =
- cpu_to_be16(priv->rx_bufq_size);
+ cpu_to_be16(rxq->nb_rx_desc);
cmd.create_rx_queue.enable_rsc = !!(priv->enable_rsc);
}
diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index 001cae2b98..ecd37ff37f 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -333,14 +333,14 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->default_rxportconf.ring_size = priv->rx_desc_cnt;
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
- .nb_max = priv->rx_desc_cnt,
+ .nb_max = gve_is_gqi(priv) ? priv->rx_desc_cnt : GVE_MAX_QUEUE_SIZE_DQO,
.nb_min = priv->rx_desc_cnt,
.nb_align = 1,
};
dev_info->default_txportconf.ring_size = priv->tx_desc_cnt;
dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
- .nb_max = priv->tx_desc_cnt,
+ .nb_max = gve_is_gqi(priv) ? priv->tx_desc_cnt : GVE_MAX_QUEUE_SIZE_DQO,
.nb_min = priv->tx_desc_cnt,
.nb_align = 1,
};
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index 37f2b60845..58d8943e71 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -23,6 +23,7 @@
#define GVE_RX_BUF_ALIGN_DQO 128
#define GVE_RX_MIN_BUF_SIZE_DQO 1024
#define GVE_RX_MAX_BUF_SIZE_DQO ((16 * 1024) - GVE_RX_BUF_ALIGN_DQO)
+#define GVE_MAX_QUEUE_SIZE_DQO 4096
#define GVE_RX_BUF_ALIGN_GQI 2048
#define GVE_RX_MIN_BUF_SIZE_GQI 2048
diff --git a/drivers/net/gve/gve_rx_dqo.c b/drivers/net/gve/gve_rx_dqo.c
index 422784e7e0..7c7a8c48d0 100644
--- a/drivers/net/gve/gve_rx_dqo.c
+++ b/drivers/net/gve/gve_rx_dqo.c
@@ -223,12 +223,6 @@ gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
uint32_t mbuf_len;
int err = 0;
- if (nb_desc != hw->rx_desc_cnt) {
- PMD_DRV_LOG(WARNING, "gve doesn't support nb_desc config, use hw nb_desc %u.",
- hw->rx_desc_cnt);
- }
- nb_desc = hw->rx_desc_cnt;
-
/* Free memory if needed */
if (dev->data->rx_queues[queue_id]) {
gve_rx_queue_release_dqo(dev, queue_id);
diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c
index e0d144835b..16101de84f 100644
--- a/drivers/net/gve/gve_tx_dqo.c
+++ b/drivers/net/gve/gve_tx_dqo.c
@@ -268,12 +268,6 @@ gve_tx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
uint16_t sw_size;
int err = 0;
- if (nb_desc != hw->tx_desc_cnt) {
- PMD_DRV_LOG(WARNING, "gve doesn't support nb_desc config, use hw nb_desc %u.",
- hw->tx_desc_cnt);
- }
- nb_desc = hw->tx_desc_cnt;
-
/* Free memory if needed. */
if (dev->data->tx_queues[queue_id]) {
gve_tx_queue_release_dqo(dev, queue_id);
--
2.43.0.rc1.413.gea7ed67945-goog
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2023-11-20 17:00 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-11-20 8:04 [PATCH] net/gve: add support for 4K ring size only for DQO Rushil Gupta
2023-11-20 16:59 ` Ferruh Yigit
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).