DPDK patches and discussions
 help / color / mirror / Atom feed
* [PATCH] net/gve: fix RX buffer size alignment
@ 2023-11-11  0:34 Joshua Washington
  2023-11-11  4:18 ` Ferruh Yigit
  2023-11-13 23:12 ` [PATCH v2] " Joshua Washington
  0 siblings, 2 replies; 6+ messages in thread
From: Joshua Washington @ 2023-11-11  0:34 UTC (permalink / raw)
  To: Junfeng Guo, Jeroen de Borst, Rushil Gupta, Joshua Washington,
	Xiaoyun Li
  Cc: dev, stable, Ferruh Yigit

In GVE, both queue formats have RX buffer size alignment requirements
which are not respected whenever the mbuf size is greater than the
minimum required by DPDK (2048 + 128). This causes the driver to break
silently in initialization, and no queues are created, leading to no
network traffic.

This change aims to remedy this by restricting the RX receive buffer
sizes to valid sizes for their respective queue formats.

Fixes: 4bec2d0b5572 ("net/gve: support queue operations")
Fixes: 1dc00f4fc74b ("net/gve: add Rx queue setup for DQO")
Cc: junfeng.guo@intel.com
Cc: stable@dpdk.org

Signed-off-by: Joshua Washington <joshwash@google.com>
Reviewed-by: Rushil Gupta <rushilg@google.com>
---
 drivers/net/gve/gve_ethdev.c |  5 ++++-
 drivers/net/gve/gve_ethdev.h | 22 +++++++++++++++++++++-
 drivers/net/gve/gve_rx.c     | 10 +++++++++-
 drivers/net/gve/gve_rx_dqo.c |  9 ++++++++-
 4 files changed, 42 insertions(+), 4 deletions(-)

diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index eb3bc7e151..43b4ab523d 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -296,7 +296,10 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->max_mac_addrs = 1;
 	dev_info->max_rx_queues = priv->max_nb_rxq;
 	dev_info->max_tx_queues = priv->max_nb_txq;
-	dev_info->min_rx_bufsize = GVE_MIN_BUF_SIZE;
+	if (gve_is_gqi(priv))
+		dev_info->min_rx_bufsize = GVE_RX_MIN_BUF_SIZE_GQI;
+	else
+		dev_info->min_rx_bufsize = GVE_RX_MIN_BUF_SIZE_DQO;
 	dev_info->max_rx_pktlen = priv->max_mtu + RTE_ETHER_HDR_LEN;
 	dev_info->max_mtu = priv->max_mtu;
 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index 755ee8ad15..0cc3b176f9 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -20,7 +20,13 @@
 #define GVE_DEFAULT_TX_RS_THRESH     32
 #define GVE_TX_MAX_FREE_SZ          512
 
-#define GVE_MIN_BUF_SIZE	    1024
+#define GVE_RX_BUF_ALIGN_DQO        128
+#define GVE_RX_MIN_BUF_SIZE_DQO    1024
+#define GVE_RX_MAX_BUF_SIZE_DQO    ((16 * 1024) - GVE_RX_BUF_ALIGN_DQO)
+
+#define GVE_RX_BUF_ALIGN_GQI       2048
+#define GVE_RX_MIN_BUF_SIZE_GQI    2048
+#define GVE_RX_MAX_BUF_SIZE_GQI    4096
 
 #define GVE_TX_CKSUM_OFFLOAD_MASK (		\
 		RTE_MBUF_F_TX_L4_MASK  |	\
@@ -337,6 +343,20 @@ gve_clear_device_rings_ok(struct gve_priv *priv)
 				&priv->state_flags);
 }
 
+static inline int
+gve_validate_rx_buffer_size(struct gve_priv *priv, uint16_t rx_buffer_size)
+{
+	uint16_t min_rx_buffer_size = gve_is_gqi(priv) ?
+		GVE_RX_MIN_BUF_SIZE_GQI : GVE_RX_MIN_BUF_SIZE_DQO;
+	if (rx_buffer_size < min_rx_buffer_size) {
+		PMD_DRV_LOG(ERR, "mbuf size must be at least %hu bytes",
+			    min_rx_buffer_size);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 int
 gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc,
 		   unsigned int socket_id, const struct rte_eth_rxconf *conf,
diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c
index b8c92ccda0..0049c6428d 100644
--- a/drivers/net/gve/gve_rx.c
+++ b/drivers/net/gve/gve_rx.c
@@ -301,6 +301,7 @@ gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	const struct rte_memzone *mz;
 	struct gve_rx_queue *rxq;
 	uint16_t free_thresh;
+	uint32_t mbuf_len;
 	int err = 0;
 
 	if (nb_desc != hw->rx_desc_cnt) {
@@ -344,7 +345,14 @@ gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
 	rxq->hw = hw;
 	rxq->ntfy_addr = &hw->db_bar2[rte_be_to_cpu_32(hw->irq_dbs[rxq->ntfy_id].id)];
 
-	rxq->rx_buf_len = rte_pktmbuf_data_room_size(rxq->mpool) - RTE_PKTMBUF_HEADROOM;
+	mbuf_len =
+		rte_pktmbuf_data_room_size(rxq->mpool) - RTE_PKTMBUF_HEADROOM;
+	err = gve_validate_rx_buffer_size(hw, mbuf_len);
+	if (err)
+		goto err_rxq;
+	rxq->rx_buf_len =
+		RTE_MIN((uint16_t)GVE_RX_MAX_BUF_SIZE_GQI,
+			RTE_ALIGN_FLOOR(mbuf_len, GVE_RX_BUF_ALIGN_GQI));
 
 	/* Allocate software ring */
 	rxq->sw_ring = rte_zmalloc_socket("gve rx sw ring", sizeof(struct rte_mbuf *) * nb_desc,
diff --git a/drivers/net/gve/gve_rx_dqo.c b/drivers/net/gve/gve_rx_dqo.c
index 7e7ddac48e..2ec6135705 100644
--- a/drivers/net/gve/gve_rx_dqo.c
+++ b/drivers/net/gve/gve_rx_dqo.c
@@ -220,6 +220,7 @@ gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
 	const struct rte_memzone *mz;
 	struct gve_rx_queue *rxq;
 	uint16_t free_thresh;
+	uint32_t mbuf_len;
 	int err = 0;
 
 	if (nb_desc != hw->rx_desc_cnt) {
@@ -264,8 +265,14 @@ gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
 	rxq->hw = hw;
 	rxq->ntfy_addr = &hw->db_bar2[rte_be_to_cpu_32(hw->irq_dbs[rxq->ntfy_id].id)];
 
-	rxq->rx_buf_len =
+	mbuf_len =
 		rte_pktmbuf_data_room_size(rxq->mpool) - RTE_PKTMBUF_HEADROOM;
+	err = gve_validate_rx_buffer_size(hw, mbuf_len);
+	if (err)
+		goto free_rxq;
+	rxq->rx_buf_len =
+		RTE_MIN((uint16_t)GVE_RX_MAX_BUF_SIZE_DQO,
+			RTE_ALIGN_FLOOR(mbuf_len, GVE_RX_BUF_ALIGN_DQO));
 
 	/* Allocate software ring */
 	rxq->sw_ring = rte_zmalloc_socket("gve rx sw ring",
-- 
2.42.0.869.gea05f2083d-goog


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2023-11-14 12:57 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-11-11  0:34 [PATCH] net/gve: fix RX buffer size alignment Joshua Washington
2023-11-11  4:18 ` Ferruh Yigit
2023-11-13 22:47   ` Joshua Washington
2023-11-13 23:12 ` [PATCH v2] " Joshua Washington
2023-11-14  2:41   ` Guo, Junfeng
2023-11-14 12:56     ` Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).