DPDK patches and discussions
 help / color / mirror / Atom feed
From: Bruce Richardson <bruce.richardson@intel.com>
To: dev@dpdk.org
Cc: Bruce Richardson <bruce.richardson@intel.com>,
	Vladimir Medvedkin <vladimir.medvedkin@intel.com>
Subject: [RFC PATCH 16/27] net/iavf: use common scalar Tx function
Date: Fri, 19 Dec 2025 17:25:33 +0000	[thread overview]
Message-ID: <20251219172548.2660777-17-bruce.richardson@intel.com> (raw)
In-Reply-To: <20251219172548.2660777-1-bruce.richardson@intel.com>

Now that the common scalar Tx function has all necessary hooks for the
features supported by the iavf driver, use the common function to avoid
duplicated code.

Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
---
 drivers/net/intel/iavf/iavf_rxtx.c | 534 ++++++-----------------------
 1 file changed, 109 insertions(+), 425 deletions(-)

diff --git a/drivers/net/intel/iavf/iavf_rxtx.c b/drivers/net/intel/iavf/iavf_rxtx.c
index 9ce978e69c..f96876ca46 100644
--- a/drivers/net/intel/iavf/iavf_rxtx.c
+++ b/drivers/net/intel/iavf/iavf_rxtx.c
@@ -2327,7 +2327,7 @@ iavf_recv_pkts_bulk_alloc(void *rx_queue,
 
 /* Check if the context descriptor is needed for TX offloading */
 static inline uint16_t
-iavf_calc_context_desc(struct rte_mbuf *mb, uint8_t vlan_flag)
+iavf_calc_context_desc(const struct rte_mbuf *mb, uint8_t vlan_flag)
 {
 	uint64_t flags = mb->ol_flags;
 	if (flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG |
@@ -2345,44 +2345,7 @@ iavf_calc_context_desc(struct rte_mbuf *mb, uint8_t vlan_flag)
 }
 
 static inline void
-iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m,
-		uint8_t vlan_flag)
-{
-	uint64_t cmd = 0;
-
-	/* TSO enabled */
-	if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))
-		cmd = CI_TX_CTX_DESC_TSO << IAVF_TXD_CTX_QW1_CMD_SHIFT;
-
-	if ((m->ol_flags & RTE_MBUF_F_TX_VLAN &&
-			vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) ||
-			m->ol_flags & RTE_MBUF_F_TX_QINQ) {
-		cmd |= CI_TX_CTX_DESC_IL2TAG2
-			<< IAVF_TXD_CTX_QW1_CMD_SHIFT;
-	}
-
-	if (IAVF_CHECK_TX_LLDP(m))
-		cmd |= IAVF_TX_CTX_DESC_SWTCH_UPLINK
-			<< IAVF_TXD_CTX_QW1_CMD_SHIFT;
-
-	*field |= cmd;
-}
-
-static inline void
-iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field,
-	struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
-{
-	uint64_t ipsec_field =
-		(uint64_t)ipsec_md->ctx_desc_ipsec_params <<
-			IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
-
-	*field |= ipsec_field;
-}
-
-
-static inline void
-iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
-		const struct rte_mbuf *m)
+iavf_fill_ctx_desc_tunnelling_field(uint64_t *qw0, const struct rte_mbuf *m)
 {
 	uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE;
 	uint64_t eip_len = 0;
@@ -2457,7 +2420,7 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
 
 static inline uint16_t
 iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
-	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
+	const struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
 {
 	uint64_t segmentation_field = 0;
 	uint64_t total_length = 0;
@@ -2496,59 +2459,31 @@ struct iavf_tx_context_desc_qws {
 	__le64 qw1;
 };
 
-static inline void
-iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
-	struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md,
-	uint16_t *tlen, uint8_t vlan_flag)
+/* IPsec callback for ci_xmit_pkts - gets IPsec descriptor information */
+static uint16_t
+iavf_get_ipsec_desc(const struct rte_mbuf *mbuf, const struct ci_tx_queue *txq,
+		    void **ipsec_metadata, uint64_t *qw0, uint64_t *qw1)
 {
-	volatile struct iavf_tx_context_desc_qws *desc_qws =
-			(volatile struct iavf_tx_context_desc_qws *)desc;
-	/* fill descriptor type field */
-	desc_qws->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT;
-
-	/* fill command field */
-	iavf_fill_ctx_desc_cmd_field(&desc_qws->qw1, m, vlan_flag);
-
-	/* fill segmentation field */
-	if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) {
-		/* fill IPsec field */
-		if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
-			iavf_fill_ctx_desc_ipsec_field(&desc_qws->qw1,
-				ipsec_md);
-
-		*tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
-				m, ipsec_md);
-	}
-
-	/* fill tunnelling field */
-	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
-		iavf_fill_ctx_desc_tunnelling_field(&desc_qws->qw0, m);
-	else
-		desc_qws->qw0 = 0;
+	struct iavf_ipsec_crypto_pkt_metadata *md;
 
-	desc_qws->qw0 = rte_cpu_to_le_64(desc_qws->qw0);
-	desc_qws->qw1 = rte_cpu_to_le_64(desc_qws->qw1);
+	if (!(mbuf->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD))
+		return 0;
 
-	/* vlan_flag specifies VLAN tag location for VLAN, and outer tag location for QinQ. */
-	if (m->ol_flags & RTE_MBUF_F_TX_QINQ)
-		desc->l2tag2 = vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2 ? m->vlan_tci_outer :
-						m->vlan_tci;
-	else if (m->ol_flags & RTE_MBUF_F_TX_VLAN && vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2)
-		desc->l2tag2 = m->vlan_tci;
-}
+	md = RTE_MBUF_DYNFIELD(mbuf, txq->ipsec_crypto_pkt_md_offset,
+				     struct iavf_ipsec_crypto_pkt_metadata *);
+	if (!md)
+		return 0;
 
+	*ipsec_metadata = md;
 
-static inline void
-iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
-	const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len)
-{
-	desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
+	/* Fill IPsec descriptor using existing logic */
+	*qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
 		IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) |
 		((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) |
 		((uint64_t)md->esp_trailer_len <<
 				IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT));
 
-	desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
+	*qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
 		IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) |
 		((uint64_t)md->next_proto <<
 				IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) |
@@ -2557,143 +2492,106 @@ iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
 		((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
 				1ULL : 0ULL) <<
 				IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) |
-		(uint64_t)IAVF_TX_DESC_DTYPE_IPSEC);
+		((uint64_t)IAVF_TX_DESC_DTYPE_IPSEC <<
+				CI_TXD_QW1_DTYPE_S));
 
-	/**
-	 * TODO: Pre-calculate this in the Session initialization
-	 *
-	 * Calculate IPsec length required in data descriptor func when TSO
-	 * offload is enabled
-	 */
-	*ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) +
-			(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
-			sizeof(struct rte_udp_hdr) : 0);
+	return 1; /* One IPsec descriptor needed */
 }
 
-static inline void
-iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
-		struct rte_mbuf *m, uint8_t vlan_flag)
+/* IPsec callback for ci_xmit_pkts - calculates segment length for IPsec+TSO */
+static uint16_t
+iavf_calc_ipsec_segment_len(const struct rte_mbuf *mb_seg, uint64_t ol_flags,
+			    const void *ipsec_metadata, uint16_t tlen)
 {
-	uint64_t command = 0;
-	uint64_t offset = 0;
-	uint64_t l2tag1 = 0;
-
-	*qw1 = CI_TX_DESC_DTYPE_DATA;
-
-	command = (uint64_t)CI_TX_DESC_CMD_ICRC;
-
-	/* Descriptor based VLAN insertion */
-	if ((vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) &&
-			m->ol_flags & RTE_MBUF_F_TX_VLAN) {
-		command |= (uint64_t)CI_TX_DESC_CMD_IL2TAG1;
-		l2tag1 |= m->vlan_tci;
-	}
-
-	/* Descriptor based QinQ insertion. vlan_flag specifies outer tag location. */
-	if (m->ol_flags & RTE_MBUF_F_TX_QINQ) {
-		command |= (uint64_t)CI_TX_DESC_CMD_IL2TAG1;
-		l2tag1 = vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1 ? m->vlan_tci_outer :
-									m->vlan_tci;
+	const struct iavf_ipsec_crypto_pkt_metadata *ipsec_md = ipsec_metadata;
+
+	if ((ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) &&
+	    (ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))) {
+		uint16_t ipseclen = ipsec_md ? (ipsec_md->esp_trailer_len +
+						ipsec_md->len_iv) : 0;
+		uint16_t slen = tlen + mb_seg->l2_len + mb_seg->l3_len +
+				mb_seg->outer_l3_len + ipseclen;
+		if (ol_flags & RTE_MBUF_F_TX_L4_MASK)
+			slen += mb_seg->l4_len;
+		return slen;
 	}
 
-	if ((m->ol_flags &
-	    (CI_TX_CKSUM_OFFLOAD_MASK | RTE_MBUF_F_TX_SEC_OFFLOAD)) == 0)
-		goto skip_cksum;
+	return mb_seg->data_len;
+}
 
-	/* Set MACLEN */
-	if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK &&
-			!(m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD))
-		offset |= (m->outer_l2_len >> 1)
-			<< CI_TX_DESC_LEN_MACLEN_S;
-	else
-		offset |= (m->l2_len >> 1)
-			<< CI_TX_DESC_LEN_MACLEN_S;
+/* Context descriptor callback for ci_xmit_pkts */
+static uint16_t
+iavf_get_context_desc(uint64_t ol_flags, const struct rte_mbuf *mbuf,
+		      const union ci_tx_offload *tx_offload __rte_unused,
+		      const struct ci_tx_queue *txq,
+		      uint32_t *td_offset __rte_unused, uint64_t *qw0, uint64_t *qw1)
+{
+	uint8_t iavf_vlan_flag;
+	uint16_t cd_l2tag2 = 0;
+	uint64_t cd_type_cmd = IAVF_TX_DESC_DTYPE_CONTEXT;
+	uint64_t cd_tunneling_params = 0;
+	uint16_t tlen = 0;
+	struct iavf_ipsec_crypto_pkt_metadata *ipsec_md = NULL;
+
+	/* Use IAVF-specific vlan_flag from txq */
+	iavf_vlan_flag = txq->vlan_flag;
+
+	/* Check if context descriptor is needed using existing IAVF logic */
+	if (!iavf_calc_context_desc(mbuf, iavf_vlan_flag))
+		return 0;
 
-	/* Enable L3 checksum offloading inner */
-	if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
-		if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
-			command |= CI_TX_DESC_CMD_IIPT_IPV4_CSUM;
-			offset |= (m->l3_len >> 2) << CI_TX_DESC_LEN_IPLEN_S;
-		}
-	} else if (m->ol_flags & RTE_MBUF_F_TX_IPV4) {
-		command |= CI_TX_DESC_CMD_IIPT_IPV4;
-		offset |= (m->l3_len >> 2) << CI_TX_DESC_LEN_IPLEN_S;
-	} else if (m->ol_flags & RTE_MBUF_F_TX_IPV6) {
-		command |= CI_TX_DESC_CMD_IIPT_IPV6;
-		offset |= (m->l3_len >> 2) << CI_TX_DESC_LEN_IPLEN_S;
+	/* Get IPsec metadata if needed */
+	if (ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+		ipsec_md = RTE_MBUF_DYNFIELD(mbuf, txq->ipsec_crypto_pkt_md_offset,
+					     struct iavf_ipsec_crypto_pkt_metadata *);
 	}
 
-	if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) {
-		if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
-			command |= CI_TX_DESC_CMD_L4T_EOFT_TCP;
-		else
-			command |= CI_TX_DESC_CMD_L4T_EOFT_UDP;
-		offset |= (m->l4_len >> 2) <<
-			      CI_TX_DESC_LEN_L4_LEN_S;
+	/* TSO command field */
+	if (ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) {
+		cd_type_cmd |= (uint64_t)CI_TX_CTX_DESC_TSO << IAVF_TXD_CTX_QW1_CMD_SHIFT;
 
-		*qw1 = rte_cpu_to_le_64((((uint64_t)command <<
-			IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) |
-			(((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &
-			IAVF_TXD_DATA_QW1_OFFSET_MASK) |
-			((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
+		/* IPsec field for TSO */
+		if (ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD && ipsec_md) {
+			uint64_t ipsec_field = (uint64_t)ipsec_md->ctx_desc_ipsec_params <<
+				IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
+			cd_type_cmd |= ipsec_field;
+		}
 
-		return;
+		/* TSO segmentation field */
+		tlen = iavf_fill_ctx_desc_segmentation_field(&cd_type_cmd,
+							     mbuf, ipsec_md);
+		(void)tlen; /* Suppress unused variable warning */
 	}
 
-	/* Enable L4 checksum offloads */
-	switch (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
-	case RTE_MBUF_F_TX_TCP_CKSUM:
-		command |= CI_TX_DESC_CMD_L4T_EOFT_TCP;
-		offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
-				CI_TX_DESC_LEN_L4_LEN_S;
-		break;
-	case RTE_MBUF_F_TX_SCTP_CKSUM:
-		command |= CI_TX_DESC_CMD_L4T_EOFT_SCTP;
-		offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
-				CI_TX_DESC_LEN_L4_LEN_S;
-		break;
-	case RTE_MBUF_F_TX_UDP_CKSUM:
-		command |= CI_TX_DESC_CMD_L4T_EOFT_UDP;
-		offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
-				CI_TX_DESC_LEN_L4_LEN_S;
-		break;
+	/* VLAN field for L2TAG2 */
+	if ((ol_flags & RTE_MBUF_F_TX_VLAN &&
+	     iavf_vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) ||
+	    ol_flags & RTE_MBUF_F_TX_QINQ) {
+		cd_type_cmd |= (uint64_t)CI_TX_CTX_DESC_IL2TAG2 << IAVF_TXD_CTX_QW1_CMD_SHIFT;
 	}
 
-skip_cksum:
-	*qw1 = rte_cpu_to_le_64((((uint64_t)command <<
-		IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) |
-		(((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) &
-		IAVF_TXD_DATA_QW1_OFFSET_MASK) |
-		((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT));
-}
-
-static inline void
-iavf_fill_data_desc(volatile struct ci_tx_desc *desc,
-	uint64_t desc_template,	uint16_t buffsz,
-	uint64_t buffer_addr)
-{
-	/* fill data descriptor qw1 from template */
-	desc->cmd_type_offset_bsz = desc_template;
-
-	/* set data buffer size */
-	desc->cmd_type_offset_bsz |=
-		(((uint64_t)buffsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) &
-		IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK);
-
-	desc->buffer_addr = rte_cpu_to_le_64(buffer_addr);
-	desc->cmd_type_offset_bsz = rte_cpu_to_le_64(desc->cmd_type_offset_bsz);
-}
-
+	/* LLDP switching field */
+	if (IAVF_CHECK_TX_LLDP(mbuf))
+		cd_type_cmd |= IAVF_TX_CTX_DESC_SWTCH_UPLINK << IAVF_TXD_CTX_QW1_CMD_SHIFT;
+
+	/* Tunneling field */
+	if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
+		iavf_fill_ctx_desc_tunnelling_field((uint64_t *)&cd_tunneling_params, mbuf);
+
+	/* L2TAG2 field (VLAN) */
+	if (ol_flags & RTE_MBUF_F_TX_QINQ) {
+		cd_l2tag2 = iavf_vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2 ?
+			    mbuf->vlan_tci_outer : mbuf->vlan_tci;
+	} else if (ol_flags & RTE_MBUF_F_TX_VLAN &&
+		   iavf_vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) {
+		cd_l2tag2 = mbuf->vlan_tci;
+	}
 
-static struct iavf_ipsec_crypto_pkt_metadata *
-iavf_ipsec_crypto_get_pkt_metadata(const struct ci_tx_queue *txq,
-		struct rte_mbuf *m)
-{
-	if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
-		return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset,
-				struct iavf_ipsec_crypto_pkt_metadata *);
+	/* Set outputs */
+	*qw0 = rte_cpu_to_le_64(cd_tunneling_params | ((uint64_t)cd_l2tag2 << 32));
+	*qw1 = rte_cpu_to_le_64(cd_type_cmd);
 
-	return NULL;
+	return 1; /* One context descriptor needed */
 }
 
 /* TX function */
@@ -2701,231 +2599,17 @@ uint16_t
 iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
 	struct ci_tx_queue *txq = tx_queue;
-	volatile struct ci_tx_desc *txr = txq->ci_tx_ring;
-	struct ci_tx_entry *txe_ring = txq->sw_ring;
-	struct ci_tx_entry *txe, *txn;
-	struct rte_mbuf *mb, *mb_seg;
-	uint64_t buf_dma_addr;
-	uint16_t desc_idx, desc_idx_last;
-	uint16_t idx;
-	uint16_t slen;
-
-
-	/* Check if the descriptor ring needs to be cleaned. */
-	if (txq->nb_tx_free < txq->tx_free_thresh)
-		ci_tx_xmit_cleanup(txq);
-
-	desc_idx = txq->tx_tail;
-	txe = &txe_ring[desc_idx];
-
-	for (idx = 0; idx < nb_pkts; idx++) {
-		volatile struct ci_tx_desc *ddesc;
-		struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
-
-		uint16_t nb_desc_ctx, nb_desc_ipsec;
-		uint16_t nb_desc_data, nb_desc_required;
-		uint16_t tlen = 0, ipseclen = 0;
-		uint64_t ddesc_template = 0;
-		uint64_t ddesc_cmd = 0;
-
-		mb = tx_pkts[idx];
 
-		RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
-
-		/**
-		 * Get metadata for ipsec crypto from mbuf dynamic fields if
-		 * security offload is specified.
-		 */
-		ipsec_md = iavf_ipsec_crypto_get_pkt_metadata(txq, mb);
-
-		nb_desc_data = mb->nb_segs;
-		nb_desc_ctx =
-			iavf_calc_context_desc(mb, txq->vlan_flag);
-		nb_desc_ipsec = !!(mb->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD);
-
-		/**
-		 * The number of descriptors that must be allocated for
-		 * a packet equals to the number of the segments of that
-		 * packet plus the context and ipsec descriptors if needed.
-		 * Recalculate the needed tx descs when TSO enabled in case
-		 * the mbuf data size exceeds max data size that hw allows
-		 * per tx desc.
-		 */
-		if (mb->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
-			nb_desc_required = ci_calc_pkt_desc(mb) + nb_desc_ctx + nb_desc_ipsec;
-		else
-			nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
-
-		desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
-
-		/* wrap descriptor ring */
-		if (desc_idx_last >= txq->nb_tx_desc)
-			desc_idx_last =
-				(uint16_t)(desc_idx_last - txq->nb_tx_desc);
-
-		PMD_TX_LOG(DEBUG,
-			"port_id=%u queue_id=%u tx_first=%u tx_last=%u",
-			txq->port_id, txq->queue_id, desc_idx, desc_idx_last);
-
-		if (nb_desc_required > txq->nb_tx_free) {
-			if (ci_tx_xmit_cleanup(txq)) {
-				if (idx == 0)
-					return 0;
-				goto end_of_tx;
-			}
-			if (unlikely(nb_desc_required > txq->tx_rs_thresh)) {
-				while (nb_desc_required > txq->nb_tx_free) {
-					if (ci_tx_xmit_cleanup(txq)) {
-						if (idx == 0)
-							return 0;
-						goto end_of_tx;
-					}
-				}
-			}
-		}
-
-		iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb,
-			txq->vlan_flag);
-
-			/* Setup TX context descriptor if required */
-		if (nb_desc_ctx) {
-			volatile struct iavf_tx_context_desc *ctx_desc =
-				(volatile struct iavf_tx_context_desc *)
-					&txr[desc_idx];
-
-			/* clear QW0 or the previous writeback value
-			 * may impact next write
-			 */
-			*(volatile uint64_t *)ctx_desc = 0;
-
-			txn = &txe_ring[txe->next_id];
-			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
-
-			if (txe->mbuf) {
-				rte_pktmbuf_free_seg(txe->mbuf);
-				txe->mbuf = NULL;
-			}
-
-			iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen,
-				txq->vlan_flag);
-			IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
-
-			txe->last_id = desc_idx_last;
-			desc_idx = txe->next_id;
-			txe = txn;
-		}
-
-		if (nb_desc_ipsec) {
-			volatile struct iavf_tx_ipsec_desc *ipsec_desc =
-				(volatile struct iavf_tx_ipsec_desc *)
-					&txr[desc_idx];
-
-			txn = &txe_ring[txe->next_id];
-			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
-
-			if (txe->mbuf) {
-				rte_pktmbuf_free_seg(txe->mbuf);
-				txe->mbuf = NULL;
-			}
-
-			iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen);
-
-			IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx);
-
-			txe->last_id = desc_idx_last;
-			desc_idx = txe->next_id;
-			txe = txn;
-		}
-
-		mb_seg = mb;
-
-		do {
-			ddesc = (volatile struct ci_tx_desc *)
-					&txr[desc_idx];
-
-			txn = &txe_ring[txe->next_id];
-			RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
-
-			if (txe->mbuf)
-				rte_pktmbuf_free_seg(txe->mbuf);
-
-			txe->mbuf = mb_seg;
-
-			if ((mb_seg->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) &&
-					(mb_seg->ol_flags &
-						(RTE_MBUF_F_TX_TCP_SEG |
-						RTE_MBUF_F_TX_UDP_SEG))) {
-				slen = tlen + mb_seg->l2_len + mb_seg->l3_len +
-						mb_seg->outer_l3_len + ipseclen;
-				if (mb_seg->ol_flags & RTE_MBUF_F_TX_L4_MASK)
-					slen += mb_seg->l4_len;
-			} else {
-				slen = mb_seg->data_len;
-			}
-
-			buf_dma_addr = rte_mbuf_data_iova(mb_seg);
-			while ((mb_seg->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
-					RTE_MBUF_F_TX_UDP_SEG)) &&
-					unlikely(slen > CI_MAX_DATA_PER_TXD)) {
-				iavf_fill_data_desc(ddesc, ddesc_template,
-					CI_MAX_DATA_PER_TXD, buf_dma_addr);
-
-				IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
-
-				buf_dma_addr += CI_MAX_DATA_PER_TXD;
-				slen -= CI_MAX_DATA_PER_TXD;
-
-				txe->last_id = desc_idx_last;
-				desc_idx = txe->next_id;
-				txe = txn;
-				ddesc = &txr[desc_idx];
-				txn = &txe_ring[txe->next_id];
-			}
-
-			iavf_fill_data_desc(ddesc, ddesc_template,
-					slen, buf_dma_addr);
-
-			IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx);
-
-			txe->last_id = desc_idx_last;
-			desc_idx = txe->next_id;
-			txe = txn;
-			mb_seg = mb_seg->next;
-		} while (mb_seg);
-
-		/* The last packet data descriptor needs End Of Packet (EOP) */
-		ddesc_cmd = CI_TX_DESC_CMD_EOP;
-
-		txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_desc_required);
-		txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_desc_required);
-
-		if (txq->nb_tx_used >= txq->tx_rs_thresh) {
-			PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id="
-				   "%4u (port=%d queue=%d)",
-				   desc_idx_last, txq->port_id, txq->queue_id);
-
-			ddesc_cmd |= CI_TX_DESC_CMD_RS;
-
-			/* Update txq RS bit counters */
-			txq->nb_tx_used = 0;
-		}
-
-		ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd <<
-				IAVF_TXD_DATA_QW1_CMD_SHIFT);
-
-		IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1);
-	}
-
-end_of_tx:
-	rte_wmb();
-
-	PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
-		   txq->port_id, txq->queue_id, desc_idx, idx);
-
-	IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx);
-	txq->tx_tail = desc_idx;
+	const struct ci_ipsec_ops ipsec_ops = {
+		.get_ipsec_desc = iavf_get_ipsec_desc,
+		.calc_segment_len = iavf_calc_ipsec_segment_len,
+	};
 
-	return idx;
+	/* IAVF does not support timestamp queues, so pass NULL for ts_fns */
+	return ci_xmit_pkts(txq, tx_pkts, nb_pkts,
+			    (txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) ?
+				CI_VLAN_IN_L2TAG1 : CI_VLAN_IN_L2TAG2,
+			    iavf_get_context_desc, &ipsec_ops, NULL);
 }
 
 /* Check if the packet with vlan user priority is transmitted in the
-- 
2.51.0


  parent reply	other threads:[~2025-12-19 17:28 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-12-19 17:25 [RFC PATCH 00/27] combine multiple Intel scalar Tx paths Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 01/27] net/intel: create common Tx descriptor structure Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 02/27] net/intel: use common tx ring structure Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 03/27] net/intel: create common post-Tx cleanup function Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 04/27] net/intel: consolidate definitions for Tx desc fields Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 05/27] net/intel: create separate header for Tx scalar fns Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 06/27] net/intel: add common fn to calculate needed descriptors Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 07/27] net/ice: refactor context descriptor handling Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 08/27] net/i40e: " Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 09/27] net/idpf: " Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 10/27] net/intel: consolidate checksum mask definition Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 11/27] net/intel: create common checksum Tx offload function Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 12/27] net/intel: create a common scalar Tx function Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 13/27] net/i40e: use " Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 14/27] net/intel: add IPSec hooks to common " Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 15/27] net/intel: support configurable VLAN tag insertion on Tx Bruce Richardson
2025-12-19 17:25 ` Bruce Richardson [this message]
2025-12-19 17:25 ` [RFC PATCH 17/27] net/i40e: document requirement for QinQ support Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 18/27] net/idpf: use common scalar Tx function Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 19/27] net/intel: avoid writing the final pkt descriptor twice Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 20/27] net/intel: write descriptors using non-volatile pointers Bruce Richardson
2025-12-20  8:43   ` Morten Brørup
2025-12-19 17:25 ` [RFC PATCH 21/27] net/intel: remove unnecessary flag clearing Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 22/27] net/intel: mark mid-burst ring cleanup as unlikely Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 23/27] net/intel: add special handling for single desc packets Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 24/27] net/intel: use separate array for desc status tracking Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 25/27] net/ixgbe: " Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 26/27] net/intel: drop unused Tx queue used count Bruce Richardson
2025-12-19 17:25 ` [RFC PATCH 27/27] net/intel: remove index for tracking end of packet Bruce Richardson
2025-12-20  9:05   ` Morten Brørup

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251219172548.2660777-17-bruce.richardson@intel.com \
    --to=bruce.richardson@intel.com \
    --cc=dev@dpdk.org \
    --cc=vladimir.medvedkin@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).