From: Bruce Richardson <bruce.richardson@intel.com>
To: dev@dpdk.org
Cc: Bruce Richardson <bruce.richardson@intel.com>
Subject: [RFC PATCH 13/27] net/i40e: use common scalar Tx function
Date: Fri, 19 Dec 2025 17:25:30 +0000 [thread overview]
Message-ID: <20251219172548.2660777-14-bruce.richardson@intel.com> (raw)
In-Reply-To: <20251219172548.2660777-1-bruce.richardson@intel.com>
Following earlier rework, the scalar transmit function for i40e can use
the common function previously moved over from ice driver. This saves
hundreds of duplicated lines of code.
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
drivers/net/intel/i40e/i40e_rxtx.c | 206 +----------------------------
1 file changed, 2 insertions(+), 204 deletions(-)
diff --git a/drivers/net/intel/i40e/i40e_rxtx.c b/drivers/net/intel/i40e/i40e_rxtx.c
index 5d1b2e4217..ecec70e0ac 100644
--- a/drivers/net/intel/i40e/i40e_rxtx.c
+++ b/drivers/net/intel/i40e/i40e_rxtx.c
@@ -1015,210 +1015,8 @@ get_context_desc(uint64_t ol_flags, const struct rte_mbuf *tx_pkt,
uint16_t
i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
- struct ci_tx_queue *txq;
- struct ci_tx_entry *sw_ring;
- struct ci_tx_entry *txe, *txn;
- volatile struct ci_tx_desc *txd;
- volatile struct ci_tx_desc *txr;
- struct rte_mbuf *tx_pkt;
- struct rte_mbuf *m_seg;
- uint16_t tx_id;
- uint16_t nb_tx;
- uint32_t td_cmd;
- uint32_t td_offset;
- uint32_t td_tag;
- uint64_t ol_flags;
- uint16_t nb_used;
- uint16_t nb_ctx;
- uint16_t tx_last;
- uint16_t slen;
- uint64_t buf_dma_addr;
- union ci_tx_offload tx_offload = {0};
-
- txq = tx_queue;
- sw_ring = txq->sw_ring;
- txr = txq->ci_tx_ring;
- tx_id = txq->tx_tail;
- txe = &sw_ring[tx_id];
-
- /* Check if the descriptor ring needs to be cleaned. */
- if (txq->nb_tx_free < txq->tx_free_thresh)
- (void)ci_tx_xmit_cleanup(txq);
-
- for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
- td_cmd = 0;
- td_tag = 0;
- td_offset = 0;
-
- tx_pkt = *tx_pkts++;
- RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
-
- ol_flags = tx_pkt->ol_flags;
- tx_offload.l2_len = tx_pkt->l2_len;
- tx_offload.l3_len = tx_pkt->l3_len;
- tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
- tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
- tx_offload.l4_len = tx_pkt->l4_len;
- tx_offload.tso_segsz = tx_pkt->tso_segsz;
-
- /* Calculate the number of context descriptors needed. */
- uint64_t cd_qw0 = 0, cd_qw1 = 0;
- nb_ctx = get_context_desc(ol_flags, tx_pkt, &tx_offload, txq, &td_offset,
- &cd_qw0, &cd_qw1);
-
- /**
- * The number of descriptors that must be allocated for
- * a packet equals to the number of the segments of that
- * packet plus 1 context descriptor if needed.
- * Recalculate the needed tx descs when TSO enabled in case
- * the mbuf data size exceeds max data size that hw allows
- * per tx desc.
- */
- if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
- nb_used = (uint16_t)(ci_calc_pkt_desc(tx_pkt) + nb_ctx);
- else
- nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx);
- tx_last = (uint16_t)(tx_id + nb_used - 1);
-
- /* Circular ring */
- if (tx_last >= txq->nb_tx_desc)
- tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
-
- if (nb_used > txq->nb_tx_free) {
- if (ci_tx_xmit_cleanup(txq) != 0) {
- if (nb_tx == 0)
- return 0;
- goto end_of_tx;
- }
- if (unlikely(nb_used > txq->tx_rs_thresh)) {
- while (nb_used > txq->nb_tx_free) {
- if (ci_tx_xmit_cleanup(txq) != 0) {
- if (nb_tx == 0)
- return 0;
- goto end_of_tx;
- }
- }
- }
- }
-
- /* Descriptor based VLAN insertion */
- if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) {
- td_cmd |= CI_TX_DESC_CMD_IL2TAG1;
- td_tag = tx_pkt->vlan_tci;
- }
-
- /* Always enable CRC offload insertion */
- td_cmd |= CI_TX_DESC_CMD_ICRC;
-
- /* Enable checksum offloading */
- if (ol_flags & CI_TX_CKSUM_OFFLOAD_MASK)
- ci_txd_enable_checksum(ol_flags, &td_cmd,
- &td_offset, tx_offload);
-
- if (nb_ctx) {
- /* Setup TX context descriptor if required */
- uint64_t *desc = RTE_CAST_PTR(uint64_t *, &txr[tx_id]);
-
- txn = &sw_ring[txe->next_id];
- RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
- if (txe->mbuf != NULL) {
- rte_pktmbuf_free_seg(txe->mbuf);
- txe->mbuf = NULL;
- }
-
- desc[0] = cd_qw0;
- desc[1] = cd_qw1;
-
- PMD_TX_LOG(DEBUG, "mbuf: %p, TCD[%u]: "
- "qw0: %#"PRIx64"; "
- "qw1: %#"PRIx64";",
- tx_pkt, tx_id, cd_qw0, cd_qw1);
-
- txe->last_id = tx_last;
- tx_id = txe->next_id;
- txe = txn;
- }
-
- m_seg = tx_pkt;
- do {
- txd = &txr[tx_id];
- txn = &sw_ring[txe->next_id];
-
- if (txe->mbuf)
- rte_pktmbuf_free_seg(txe->mbuf);
- txe->mbuf = m_seg;
-
- /* Setup TX Descriptor */
- slen = m_seg->data_len;
- buf_dma_addr = rte_mbuf_data_iova(m_seg);
-
- while ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) &&
- unlikely(slen > CI_MAX_DATA_PER_TXD)) {
- txd->buffer_addr =
- rte_cpu_to_le_64(buf_dma_addr);
- txd->cmd_type_offset_bsz =
- i40e_build_ctob(td_cmd,
- td_offset, CI_MAX_DATA_PER_TXD,
- td_tag);
-
- buf_dma_addr += CI_MAX_DATA_PER_TXD;
- slen -= CI_MAX_DATA_PER_TXD;
-
- txe->last_id = tx_last;
- tx_id = txe->next_id;
- txe = txn;
- txd = &txr[tx_id];
- txn = &sw_ring[txe->next_id];
- }
- PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]: "
- "buf_dma_addr: %#"PRIx64"; "
- "td_cmd: %#x; "
- "td_offset: %#x; "
- "td_len: %u; "
- "td_tag: %#x;",
- tx_pkt, tx_id, buf_dma_addr,
- td_cmd, td_offset, slen, td_tag);
-
- txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
- txd->cmd_type_offset_bsz = i40e_build_ctob(td_cmd,
- td_offset, slen, td_tag);
- txe->last_id = tx_last;
- tx_id = txe->next_id;
- txe = txn;
- m_seg = m_seg->next;
- } while (m_seg != NULL);
-
- /* The last packet data descriptor needs End Of Packet (EOP) */
- td_cmd |= CI_TX_DESC_CMD_EOP;
- txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
- txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
-
- if (txq->nb_tx_used >= txq->tx_rs_thresh) {
- PMD_TX_LOG(DEBUG,
- "Setting RS bit on TXD id="
- "%4u (port=%d queue=%d)",
- tx_last, txq->port_id, txq->queue_id);
-
- td_cmd |= CI_TX_DESC_CMD_RS;
-
- /* Update txq RS bit counters */
- txq->nb_tx_used = 0;
- }
-
- txd->cmd_type_offset_bsz |=
- rte_cpu_to_le_64(((uint64_t)td_cmd) << CI_TXD_QW1_CMD_S);
- }
-
-end_of_tx:
- PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
- (unsigned) txq->port_id, (unsigned) txq->queue_id,
- (unsigned) tx_id, (unsigned) nb_tx);
-
- rte_io_wmb();
- I40E_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
- txq->tx_tail = tx_id;
-
- return nb_tx;
+ /* i40e does not support timestamp queues, so pass NULL for ts_fns */
+ return ci_xmit_pkts(tx_queue, tx_pkts, nb_pkts, get_context_desc, NULL);
}
static __rte_always_inline int
--
2.51.0
next prev parent reply other threads:[~2025-12-19 17:27 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-12-19 17:25 [RFC PATCH 00/27] combine multiple Intel scalar Tx paths Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 01/27] net/intel: create common Tx descriptor structure Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 02/27] net/intel: use common tx ring structure Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 03/27] net/intel: create common post-Tx cleanup function Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 04/27] net/intel: consolidate definitions for Tx desc fields Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 05/27] net/intel: create separate header for Tx scalar fns Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 06/27] net/intel: add common fn to calculate needed descriptors Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 07/27] net/ice: refactor context descriptor handling Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 08/27] net/i40e: " Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 09/27] net/idpf: " Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 10/27] net/intel: consolidate checksum mask definition Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 11/27] net/intel: create common checksum Tx offload function Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 12/27] net/intel: create a common scalar Tx function Bruce Richardson
2025-12-19 17:25 ` Bruce Richardson [this message]
2025-12-19 17:25 ` [RFC PATCH 14/27] net/intel: add IPSec hooks to common " Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 15/27] net/intel: support configurable VLAN tag insertion on Tx Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 16/27] net/iavf: use common scalar Tx function Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 17/27] net/i40e: document requirement for QinQ support Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 18/27] net/idpf: use common scalar Tx function Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 19/27] net/intel: avoid writing the final pkt descriptor twice Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 20/27] net/intel: write descriptors using non-volatile pointers Bruce Richardson
2025-12-20 8:43 ` Morten Brørup
2025-12-19 17:25 ` [RFC PATCH 21/27] net/intel: remove unnecessary flag clearing Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 22/27] net/intel: mark mid-burst ring cleanup as unlikely Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 23/27] net/intel: add special handling for single desc packets Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 24/27] net/intel: use separate array for desc status tracking Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 25/27] net/ixgbe: " Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 26/27] net/intel: drop unused Tx queue used count Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 27/27] net/intel: remove index for tracking end of packet Bruce Richardson
2025-12-20 9:05 ` Morten Brørup
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251219172548.2660777-14-bruce.richardson@intel.com \
--to=bruce.richardson@intel.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).