DPDK patches and discussions
 help / color / mirror / Atom feed
From: Joshua Washington <joshwash@google.com>
To: Thomas Monjalon <thomas@monjalon.net>,
	Jeroen de Borst <jeroendb@google.com>,
	 Joshua Washington <joshwash@google.com>,
	Junfeng Guo <junfengg@nvidia.com>,
	 Rushil Gupta <rushilg@google.com>
Cc: dev@dpdk.org, junfeng.guo@intel.com, stable@dpdk.org,
	 Ankit Garg <nktgrg@google.com>
Subject: [PATCH 1/7] net/gve: send whole packet when mbuf is large
Date: Mon,  7 Jul 2025 16:18:05 -0700	[thread overview]
Message-ID: <20250707231812.1937260-2-joshwash@google.com> (raw)
In-Reply-To: <20250707231812.1937260-1-joshwash@google.com>

Before this patch, only one descriptor would be written per mbuf in a
packet. In cases like TSO, it is possible for a single mbuf to have more
bytes than GVE_MAX_TX_BUF_SIZE_DQO. As such, instead of simply
truncating the data down to this size, the driver should actually write
descriptors for the rest of the buffers in the mbuf segment.

To that effect, the number of descriptors needed to send a packet must
be corrected to account for the potential additional descriptors.

Fixes: 4022f9999f56 ("net/gve: support basic Tx data path for DQO")
Cc: junfeng.guo@intel.com
Cc: stable@dpdk.org

Signed-off-by: Joshua Washington <joshwash@google.com>
Reviewed-by: Ankit Garg <nktgrg@google.com>
---
 .mailmap                     |  1 +
 drivers/net/gve/gve_tx_dqo.c | 54 ++++++++++++++++++++++++------------
 2 files changed, 38 insertions(+), 17 deletions(-)

diff --git a/.mailmap b/.mailmap
index 1ea4f9446d..758878bd8b 100644
--- a/.mailmap
+++ b/.mailmap
@@ -124,6 +124,7 @@ Andy Green <andy@warmcat.com>
 Andy Moreton <andy.moreton@amd.com> <amoreton@xilinx.com> <amoreton@solarflare.com>
 Andy Pei <andy.pei@intel.com>
 Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
+Ankit Garg <nktgrg@google.com>
 Ankur Dwivedi <adwivedi@marvell.com> <ankur.dwivedi@caviumnetworks.com> <ankur.dwivedi@cavium.com>
 Anna Lukin <annal@silicom.co.il>
 Anoob Joseph <anoobj@marvell.com> <anoob.joseph@caviumnetworks.com>
diff --git a/drivers/net/gve/gve_tx_dqo.c b/drivers/net/gve/gve_tx_dqo.c
index 6984f92443..6227fa73b0 100644
--- a/drivers/net/gve/gve_tx_dqo.c
+++ b/drivers/net/gve/gve_tx_dqo.c
@@ -74,6 +74,19 @@ gve_tx_clean_dqo(struct gve_tx_queue *txq)
 	txq->complq_tail = next;
 }
 
+static uint16_t
+gve_tx_pkt_nb_data_descs(struct rte_mbuf *tx_pkt)
+{
+	int nb_descs = 0;
+
+	while (tx_pkt) {
+		nb_descs += (GVE_TX_MAX_BUF_SIZE_DQO - 1 + tx_pkt->data_len) /
+			GVE_TX_MAX_BUF_SIZE_DQO;
+		tx_pkt = tx_pkt->next;
+	}
+	return nb_descs;
+}
+
 static inline void
 gve_tx_fill_seg_desc_dqo(volatile union gve_tx_desc_dqo *desc, struct rte_mbuf *tx_pkt)
 {
@@ -97,7 +110,7 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 	uint16_t nb_to_clean;
 	uint16_t nb_tx = 0;
 	uint64_t ol_flags;
-	uint16_t nb_used;
+	uint16_t nb_descs;
 	uint16_t tx_id;
 	uint16_t sw_id;
 	uint64_t bytes;
@@ -124,14 +137,14 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		}
 
 		ol_flags = tx_pkt->ol_flags;
-		nb_used = tx_pkt->nb_segs;
 		first_sw_id = sw_id;
 
 		tso = !!(ol_flags & RTE_MBUF_F_TX_TCP_SEG);
 		csum = !!(ol_flags & GVE_TX_CKSUM_OFFLOAD_MASK_DQO);
 
-		nb_used += tso;
-		if (txq->nb_free < nb_used)
+		nb_descs = gve_tx_pkt_nb_data_descs(tx_pkt);
+		nb_descs += tso;
+		if (txq->nb_free < nb_descs)
 			break;
 
 		if (tso) {
@@ -144,21 +157,28 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 			if (sw_ring[sw_id] != NULL)
 				PMD_DRV_LOG(DEBUG, "Overwriting an entry in sw_ring");
 
-			txd = &txr[tx_id];
 			sw_ring[sw_id] = tx_pkt;
 
-			/* fill Tx descriptor */
-			txd->pkt.buf_addr = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_pkt));
-			txd->pkt.dtype = GVE_TX_PKT_DESC_DTYPE_DQO;
-			txd->pkt.compl_tag = rte_cpu_to_le_16(first_sw_id);
-			txd->pkt.buf_size = RTE_MIN(tx_pkt->data_len, GVE_TX_MAX_BUF_SIZE_DQO);
-			txd->pkt.end_of_packet = 0;
-			txd->pkt.checksum_offload_enable = csum;
+			/* fill Tx descriptors */
+			int mbuf_offset = 0;
+			while (mbuf_offset < tx_pkt->data_len) {
+				uint64_t buf_addr = rte_mbuf_data_iova(tx_pkt) +
+					mbuf_offset;
+
+				txd = &txr[tx_id];
+				txd->pkt.buf_addr = rte_cpu_to_le_64(buf_addr);
+				txd->pkt.compl_tag = rte_cpu_to_le_16(first_sw_id);
+				txd->pkt.dtype = GVE_TX_PKT_DESC_DTYPE_DQO;
+				txd->pkt.buf_size = RTE_MIN(tx_pkt->data_len - mbuf_offset,
+							    GVE_TX_MAX_BUF_SIZE_DQO);
+				txd->pkt.end_of_packet = 0;
+				txd->pkt.checksum_offload_enable = csum;
+
+				mbuf_offset += txd->pkt.buf_size;
+				tx_id = (tx_id + 1) & mask;
+			}
 
-			/* size of desc_ring and sw_ring could be different */
-			tx_id = (tx_id + 1) & mask;
 			sw_id = (sw_id + 1) & sw_mask;
-
 			bytes += tx_pkt->data_len;
 			tx_pkt = tx_pkt->next;
 		} while (tx_pkt);
@@ -166,8 +186,8 @@ gve_tx_burst_dqo(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		/* fill the last descriptor with End of Packet (EOP) bit */
 		txd->pkt.end_of_packet = 1;
 
-		txq->nb_free -= nb_used;
-		txq->nb_used += nb_used;
+		txq->nb_free -= nb_descs;
+		txq->nb_used += nb_descs;
 	}
 
 	/* update the tail pointer if any packets were processed */
-- 
2.50.0.727.gbf7dc18ff4-goog


  reply	other threads:[~2025-07-07 23:18 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-07-07 23:18 [PATCH 0/7] net/gve: Tx datapath fixes for GVE DQO Joshua Washington
2025-07-07 23:18 ` Joshua Washington [this message]
2025-07-07 23:18 ` [PATCH 2/7] net/gve: clean when there are insufficient Tx descs Joshua Washington
2025-07-07 23:18 ` [PATCH 3/7] net/gve: don't write zero-length descriptors Joshua Washington
2025-07-07 23:18 ` [PATCH 4/7] net/gve: validate Tx packet before sending Joshua Washington
2025-07-07 23:18 ` [PATCH 5/7] net/gve: add DQO Tx descriptor limit Joshua Washington
2025-07-07 23:18 ` [PATCH 6/7] net/gve: fix DQO TSO " Joshua Washington
2025-07-07 23:18 ` [PATCH 7/7] net/gve: clear DQO Tx descriptors before writing Joshua Washington

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250707231812.1937260-2-joshwash@google.com \
    --to=joshwash@google.com \
    --cc=dev@dpdk.org \
    --cc=jeroendb@google.com \
    --cc=junfeng.guo@intel.com \
    --cc=junfengg@nvidia.com \
    --cc=nktgrg@google.com \
    --cc=rushilg@google.com \
    --cc=stable@dpdk.org \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).