From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 213A147091; Fri, 19 Dec 2025 18:28:02 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 33D3440A6F; Fri, 19 Dec 2025 18:26:34 +0100 (CET) Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.15]) by mails.dpdk.org (Postfix) with ESMTP id 4E97F406BA for ; Fri, 19 Dec 2025 18:26:31 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1766165192; x=1797701192; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=iwJcdavvaGPajP7txCs5QiNIqnDHyv5EYCMCdtM+1Q0=; b=S7Sibz1+6GLTjIcooMjam9ZvcUdvjFbjE2BUfImoZvYqxAtHDEzzFGQx olTPBlokxBPht2kZdsTB4RGsH080eSEWn8r1v5G7Wzu556cQu+xG3/7Jo 4NTNf+oprpAyOOji3X6oitq7NegOqClpEnzHPVH9doY5qygzBIhOGcL9u NTjLlr0gxurz5QgaiV7/yp9u1WUB2eY57Va2DvTvmo4TfPotFwdd7cjZM 0I+Uxm+minEx9tgGIFlXmut4DaKUrgPRys/QJpqi8ThOT/mZi0uAeUAqq +QjIPnLoP704Syzdw5hUoy6T4lh4MyJ16/y8cLdg2OAT/z33IK29OyOL1 g==; X-CSE-ConnectionGUID: D6Onwm+QQg6pVteUHtgqzg== X-CSE-MsgGUID: elxVe9DpTkKwspbunqLA6w== X-IronPort-AV: E=McAfee;i="6800,10657,11647"; a="71759500" X-IronPort-AV: E=Sophos;i="6.21,161,1763452800"; d="scan'208";a="71759500" Received: from orviesa010.jf.intel.com ([10.64.159.150]) by orvoesa107.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 19 Dec 2025 09:26:31 -0800 X-CSE-ConnectionGUID: 7IO+FfDgTyG1rWyvmfJnzg== X-CSE-MsgGUID: 9xk8/npbQWqGocqy+ucRwQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.21,161,1763452800"; d="scan'208";a="198170473" Received: from silpixa00401385.ir.intel.com ([10.20.224.226]) by orviesa010.jf.intel.com with ESMTP; 19 Dec 2025 09:26:30 -0800 From: Bruce Richardson To: dev@dpdk.org Cc: Bruce Richardson , Vladimir Medvedkin Subject: [RFC PATCH 16/27] net/iavf: use common scalar Tx function Date: Fri, 19 Dec 2025 17:25:33 +0000 Message-ID: <20251219172548.2660777-17-bruce.richardson@intel.com> X-Mailer: git-send-email 2.51.0 In-Reply-To: <20251219172548.2660777-1-bruce.richardson@intel.com> References: <20251219172548.2660777-1-bruce.richardson@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Now that the common scalar Tx function has all necessary hooks for the features supported by the iavf driver, use the common function to avoid duplicated code. Signed-off-by: Bruce Richardson --- drivers/net/intel/iavf/iavf_rxtx.c | 534 ++++++----------------------- 1 file changed, 109 insertions(+), 425 deletions(-) diff --git a/drivers/net/intel/iavf/iavf_rxtx.c b/drivers/net/intel/iavf/iavf_rxtx.c index 9ce978e69c..f96876ca46 100644 --- a/drivers/net/intel/iavf/iavf_rxtx.c +++ b/drivers/net/intel/iavf/iavf_rxtx.c @@ -2327,7 +2327,7 @@ iavf_recv_pkts_bulk_alloc(void *rx_queue, /* Check if the context descriptor is needed for TX offloading */ static inline uint16_t -iavf_calc_context_desc(struct rte_mbuf *mb, uint8_t vlan_flag) +iavf_calc_context_desc(const struct rte_mbuf *mb, uint8_t vlan_flag) { uint64_t flags = mb->ol_flags; if (flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG | @@ -2345,44 +2345,7 @@ iavf_calc_context_desc(struct rte_mbuf *mb, uint8_t vlan_flag) } static inline void -iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field, struct rte_mbuf *m, - uint8_t vlan_flag) -{ - uint64_t cmd = 0; - - /* TSO enabled */ - if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) - cmd = CI_TX_CTX_DESC_TSO << IAVF_TXD_CTX_QW1_CMD_SHIFT; - - if ((m->ol_flags & RTE_MBUF_F_TX_VLAN && - vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) || - m->ol_flags & RTE_MBUF_F_TX_QINQ) { - cmd |= CI_TX_CTX_DESC_IL2TAG2 - << IAVF_TXD_CTX_QW1_CMD_SHIFT; - } - - if (IAVF_CHECK_TX_LLDP(m)) - cmd |= IAVF_TX_CTX_DESC_SWTCH_UPLINK - << IAVF_TXD_CTX_QW1_CMD_SHIFT; - - *field |= cmd; -} - -static inline void -iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field, - struct iavf_ipsec_crypto_pkt_metadata *ipsec_md) -{ - uint64_t ipsec_field = - (uint64_t)ipsec_md->ctx_desc_ipsec_params << - IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT; - - *field |= ipsec_field; -} - - -static inline void -iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0, - const struct rte_mbuf *m) +iavf_fill_ctx_desc_tunnelling_field(uint64_t *qw0, const struct rte_mbuf *m) { uint64_t eip_typ = IAVF_TX_CTX_DESC_EIPT_NONE; uint64_t eip_len = 0; @@ -2457,7 +2420,7 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0, static inline uint16_t iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field, - struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md) + const struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md) { uint64_t segmentation_field = 0; uint64_t total_length = 0; @@ -2496,59 +2459,31 @@ struct iavf_tx_context_desc_qws { __le64 qw1; }; -static inline void -iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc, - struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md, - uint16_t *tlen, uint8_t vlan_flag) +/* IPsec callback for ci_xmit_pkts - gets IPsec descriptor information */ +static uint16_t +iavf_get_ipsec_desc(const struct rte_mbuf *mbuf, const struct ci_tx_queue *txq, + void **ipsec_metadata, uint64_t *qw0, uint64_t *qw1) { - volatile struct iavf_tx_context_desc_qws *desc_qws = - (volatile struct iavf_tx_context_desc_qws *)desc; - /* fill descriptor type field */ - desc_qws->qw1 = IAVF_TX_DESC_DTYPE_CONTEXT; - - /* fill command field */ - iavf_fill_ctx_desc_cmd_field(&desc_qws->qw1, m, vlan_flag); - - /* fill segmentation field */ - if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) { - /* fill IPsec field */ - if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) - iavf_fill_ctx_desc_ipsec_field(&desc_qws->qw1, - ipsec_md); - - *tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1, - m, ipsec_md); - } - - /* fill tunnelling field */ - if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) - iavf_fill_ctx_desc_tunnelling_field(&desc_qws->qw0, m); - else - desc_qws->qw0 = 0; + struct iavf_ipsec_crypto_pkt_metadata *md; - desc_qws->qw0 = rte_cpu_to_le_64(desc_qws->qw0); - desc_qws->qw1 = rte_cpu_to_le_64(desc_qws->qw1); + if (!(mbuf->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) + return 0; - /* vlan_flag specifies VLAN tag location for VLAN, and outer tag location for QinQ. */ - if (m->ol_flags & RTE_MBUF_F_TX_QINQ) - desc->l2tag2 = vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2 ? m->vlan_tci_outer : - m->vlan_tci; - else if (m->ol_flags & RTE_MBUF_F_TX_VLAN && vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) - desc->l2tag2 = m->vlan_tci; -} + md = RTE_MBUF_DYNFIELD(mbuf, txq->ipsec_crypto_pkt_md_offset, + struct iavf_ipsec_crypto_pkt_metadata *); + if (!md) + return 0; + *ipsec_metadata = md; -static inline void -iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc, - const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len) -{ - desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len << + /* Fill IPsec descriptor using existing logic */ + *qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len << IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) | ((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT) | ((uint64_t)md->esp_trailer_len << IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT)); - desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx << + *qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx << IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) | ((uint64_t)md->next_proto << IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) | @@ -2557,143 +2492,106 @@ iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc, ((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ? 1ULL : 0ULL) << IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) | - (uint64_t)IAVF_TX_DESC_DTYPE_IPSEC); + ((uint64_t)IAVF_TX_DESC_DTYPE_IPSEC << + CI_TXD_QW1_DTYPE_S)); - /** - * TODO: Pre-calculate this in the Session initialization - * - * Calculate IPsec length required in data descriptor func when TSO - * offload is enabled - */ - *ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) + - (md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ? - sizeof(struct rte_udp_hdr) : 0); + return 1; /* One IPsec descriptor needed */ } -static inline void -iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1, - struct rte_mbuf *m, uint8_t vlan_flag) +/* IPsec callback for ci_xmit_pkts - calculates segment length for IPsec+TSO */ +static uint16_t +iavf_calc_ipsec_segment_len(const struct rte_mbuf *mb_seg, uint64_t ol_flags, + const void *ipsec_metadata, uint16_t tlen) { - uint64_t command = 0; - uint64_t offset = 0; - uint64_t l2tag1 = 0; - - *qw1 = CI_TX_DESC_DTYPE_DATA; - - command = (uint64_t)CI_TX_DESC_CMD_ICRC; - - /* Descriptor based VLAN insertion */ - if ((vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) && - m->ol_flags & RTE_MBUF_F_TX_VLAN) { - command |= (uint64_t)CI_TX_DESC_CMD_IL2TAG1; - l2tag1 |= m->vlan_tci; - } - - /* Descriptor based QinQ insertion. vlan_flag specifies outer tag location. */ - if (m->ol_flags & RTE_MBUF_F_TX_QINQ) { - command |= (uint64_t)CI_TX_DESC_CMD_IL2TAG1; - l2tag1 = vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1 ? m->vlan_tci_outer : - m->vlan_tci; + const struct iavf_ipsec_crypto_pkt_metadata *ipsec_md = ipsec_metadata; + + if ((ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) && + (ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG))) { + uint16_t ipseclen = ipsec_md ? (ipsec_md->esp_trailer_len + + ipsec_md->len_iv) : 0; + uint16_t slen = tlen + mb_seg->l2_len + mb_seg->l3_len + + mb_seg->outer_l3_len + ipseclen; + if (ol_flags & RTE_MBUF_F_TX_L4_MASK) + slen += mb_seg->l4_len; + return slen; } - if ((m->ol_flags & - (CI_TX_CKSUM_OFFLOAD_MASK | RTE_MBUF_F_TX_SEC_OFFLOAD)) == 0) - goto skip_cksum; + return mb_seg->data_len; +} - /* Set MACLEN */ - if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK && - !(m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) - offset |= (m->outer_l2_len >> 1) - << CI_TX_DESC_LEN_MACLEN_S; - else - offset |= (m->l2_len >> 1) - << CI_TX_DESC_LEN_MACLEN_S; +/* Context descriptor callback for ci_xmit_pkts */ +static uint16_t +iavf_get_context_desc(uint64_t ol_flags, const struct rte_mbuf *mbuf, + const union ci_tx_offload *tx_offload __rte_unused, + const struct ci_tx_queue *txq, + uint32_t *td_offset __rte_unused, uint64_t *qw0, uint64_t *qw1) +{ + uint8_t iavf_vlan_flag; + uint16_t cd_l2tag2 = 0; + uint64_t cd_type_cmd = IAVF_TX_DESC_DTYPE_CONTEXT; + uint64_t cd_tunneling_params = 0; + uint16_t tlen = 0; + struct iavf_ipsec_crypto_pkt_metadata *ipsec_md = NULL; + + /* Use IAVF-specific vlan_flag from txq */ + iavf_vlan_flag = txq->vlan_flag; + + /* Check if context descriptor is needed using existing IAVF logic */ + if (!iavf_calc_context_desc(mbuf, iavf_vlan_flag)) + return 0; - /* Enable L3 checksum offloading inner */ - if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { - if (m->ol_flags & RTE_MBUF_F_TX_IPV4) { - command |= CI_TX_DESC_CMD_IIPT_IPV4_CSUM; - offset |= (m->l3_len >> 2) << CI_TX_DESC_LEN_IPLEN_S; - } - } else if (m->ol_flags & RTE_MBUF_F_TX_IPV4) { - command |= CI_TX_DESC_CMD_IIPT_IPV4; - offset |= (m->l3_len >> 2) << CI_TX_DESC_LEN_IPLEN_S; - } else if (m->ol_flags & RTE_MBUF_F_TX_IPV6) { - command |= CI_TX_DESC_CMD_IIPT_IPV6; - offset |= (m->l3_len >> 2) << CI_TX_DESC_LEN_IPLEN_S; + /* Get IPsec metadata if needed */ + if (ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) { + ipsec_md = RTE_MBUF_DYNFIELD(mbuf, txq->ipsec_crypto_pkt_md_offset, + struct iavf_ipsec_crypto_pkt_metadata *); } - if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) { - if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) - command |= CI_TX_DESC_CMD_L4T_EOFT_TCP; - else - command |= CI_TX_DESC_CMD_L4T_EOFT_UDP; - offset |= (m->l4_len >> 2) << - CI_TX_DESC_LEN_L4_LEN_S; + /* TSO command field */ + if (ol_flags & (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG)) { + cd_type_cmd |= (uint64_t)CI_TX_CTX_DESC_TSO << IAVF_TXD_CTX_QW1_CMD_SHIFT; - *qw1 = rte_cpu_to_le_64((((uint64_t)command << - IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) | - (((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) & - IAVF_TXD_DATA_QW1_OFFSET_MASK) | - ((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)); + /* IPsec field for TSO */ + if (ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD && ipsec_md) { + uint64_t ipsec_field = (uint64_t)ipsec_md->ctx_desc_ipsec_params << + IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT; + cd_type_cmd |= ipsec_field; + } - return; + /* TSO segmentation field */ + tlen = iavf_fill_ctx_desc_segmentation_field(&cd_type_cmd, + mbuf, ipsec_md); + (void)tlen; /* Suppress unused variable warning */ } - /* Enable L4 checksum offloads */ - switch (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) { - case RTE_MBUF_F_TX_TCP_CKSUM: - command |= CI_TX_DESC_CMD_L4T_EOFT_TCP; - offset |= (sizeof(struct rte_tcp_hdr) >> 2) << - CI_TX_DESC_LEN_L4_LEN_S; - break; - case RTE_MBUF_F_TX_SCTP_CKSUM: - command |= CI_TX_DESC_CMD_L4T_EOFT_SCTP; - offset |= (sizeof(struct rte_sctp_hdr) >> 2) << - CI_TX_DESC_LEN_L4_LEN_S; - break; - case RTE_MBUF_F_TX_UDP_CKSUM: - command |= CI_TX_DESC_CMD_L4T_EOFT_UDP; - offset |= (sizeof(struct rte_udp_hdr) >> 2) << - CI_TX_DESC_LEN_L4_LEN_S; - break; + /* VLAN field for L2TAG2 */ + if ((ol_flags & RTE_MBUF_F_TX_VLAN && + iavf_vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) || + ol_flags & RTE_MBUF_F_TX_QINQ) { + cd_type_cmd |= (uint64_t)CI_TX_CTX_DESC_IL2TAG2 << IAVF_TXD_CTX_QW1_CMD_SHIFT; } -skip_cksum: - *qw1 = rte_cpu_to_le_64((((uint64_t)command << - IAVF_TXD_DATA_QW1_CMD_SHIFT) & IAVF_TXD_DATA_QW1_CMD_MASK) | - (((uint64_t)offset << IAVF_TXD_DATA_QW1_OFFSET_SHIFT) & - IAVF_TXD_DATA_QW1_OFFSET_MASK) | - ((uint64_t)l2tag1 << IAVF_TXD_DATA_QW1_L2TAG1_SHIFT)); -} - -static inline void -iavf_fill_data_desc(volatile struct ci_tx_desc *desc, - uint64_t desc_template, uint16_t buffsz, - uint64_t buffer_addr) -{ - /* fill data descriptor qw1 from template */ - desc->cmd_type_offset_bsz = desc_template; - - /* set data buffer size */ - desc->cmd_type_offset_bsz |= - (((uint64_t)buffsz << IAVF_TXD_DATA_QW1_TX_BUF_SZ_SHIFT) & - IAVF_TXD_DATA_QW1_TX_BUF_SZ_MASK); - - desc->buffer_addr = rte_cpu_to_le_64(buffer_addr); - desc->cmd_type_offset_bsz = rte_cpu_to_le_64(desc->cmd_type_offset_bsz); -} - + /* LLDP switching field */ + if (IAVF_CHECK_TX_LLDP(mbuf)) + cd_type_cmd |= IAVF_TX_CTX_DESC_SWTCH_UPLINK << IAVF_TXD_CTX_QW1_CMD_SHIFT; + + /* Tunneling field */ + if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) + iavf_fill_ctx_desc_tunnelling_field((uint64_t *)&cd_tunneling_params, mbuf); + + /* L2TAG2 field (VLAN) */ + if (ol_flags & RTE_MBUF_F_TX_QINQ) { + cd_l2tag2 = iavf_vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2 ? + mbuf->vlan_tci_outer : mbuf->vlan_tci; + } else if (ol_flags & RTE_MBUF_F_TX_VLAN && + iavf_vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2) { + cd_l2tag2 = mbuf->vlan_tci; + } -static struct iavf_ipsec_crypto_pkt_metadata * -iavf_ipsec_crypto_get_pkt_metadata(const struct ci_tx_queue *txq, - struct rte_mbuf *m) -{ - if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) - return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset, - struct iavf_ipsec_crypto_pkt_metadata *); + /* Set outputs */ + *qw0 = rte_cpu_to_le_64(cd_tunneling_params | ((uint64_t)cd_l2tag2 << 32)); + *qw1 = rte_cpu_to_le_64(cd_type_cmd); - return NULL; + return 1; /* One context descriptor needed */ } /* TX function */ @@ -2701,231 +2599,17 @@ uint16_t iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { struct ci_tx_queue *txq = tx_queue; - volatile struct ci_tx_desc *txr = txq->ci_tx_ring; - struct ci_tx_entry *txe_ring = txq->sw_ring; - struct ci_tx_entry *txe, *txn; - struct rte_mbuf *mb, *mb_seg; - uint64_t buf_dma_addr; - uint16_t desc_idx, desc_idx_last; - uint16_t idx; - uint16_t slen; - - - /* Check if the descriptor ring needs to be cleaned. */ - if (txq->nb_tx_free < txq->tx_free_thresh) - ci_tx_xmit_cleanup(txq); - - desc_idx = txq->tx_tail; - txe = &txe_ring[desc_idx]; - - for (idx = 0; idx < nb_pkts; idx++) { - volatile struct ci_tx_desc *ddesc; - struct iavf_ipsec_crypto_pkt_metadata *ipsec_md; - - uint16_t nb_desc_ctx, nb_desc_ipsec; - uint16_t nb_desc_data, nb_desc_required; - uint16_t tlen = 0, ipseclen = 0; - uint64_t ddesc_template = 0; - uint64_t ddesc_cmd = 0; - - mb = tx_pkts[idx]; - RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf); - - /** - * Get metadata for ipsec crypto from mbuf dynamic fields if - * security offload is specified. - */ - ipsec_md = iavf_ipsec_crypto_get_pkt_metadata(txq, mb); - - nb_desc_data = mb->nb_segs; - nb_desc_ctx = - iavf_calc_context_desc(mb, txq->vlan_flag); - nb_desc_ipsec = !!(mb->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD); - - /** - * The number of descriptors that must be allocated for - * a packet equals to the number of the segments of that - * packet plus the context and ipsec descriptors if needed. - * Recalculate the needed tx descs when TSO enabled in case - * the mbuf data size exceeds max data size that hw allows - * per tx desc. - */ - if (mb->ol_flags & RTE_MBUF_F_TX_TCP_SEG) - nb_desc_required = ci_calc_pkt_desc(mb) + nb_desc_ctx + nb_desc_ipsec; - else - nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec; - - desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1); - - /* wrap descriptor ring */ - if (desc_idx_last >= txq->nb_tx_desc) - desc_idx_last = - (uint16_t)(desc_idx_last - txq->nb_tx_desc); - - PMD_TX_LOG(DEBUG, - "port_id=%u queue_id=%u tx_first=%u tx_last=%u", - txq->port_id, txq->queue_id, desc_idx, desc_idx_last); - - if (nb_desc_required > txq->nb_tx_free) { - if (ci_tx_xmit_cleanup(txq)) { - if (idx == 0) - return 0; - goto end_of_tx; - } - if (unlikely(nb_desc_required > txq->tx_rs_thresh)) { - while (nb_desc_required > txq->nb_tx_free) { - if (ci_tx_xmit_cleanup(txq)) { - if (idx == 0) - return 0; - goto end_of_tx; - } - } - } - } - - iavf_build_data_desc_cmd_offset_fields(&ddesc_template, mb, - txq->vlan_flag); - - /* Setup TX context descriptor if required */ - if (nb_desc_ctx) { - volatile struct iavf_tx_context_desc *ctx_desc = - (volatile struct iavf_tx_context_desc *) - &txr[desc_idx]; - - /* clear QW0 or the previous writeback value - * may impact next write - */ - *(volatile uint64_t *)ctx_desc = 0; - - txn = &txe_ring[txe->next_id]; - RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); - - if (txe->mbuf) { - rte_pktmbuf_free_seg(txe->mbuf); - txe->mbuf = NULL; - } - - iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen, - txq->vlan_flag); - IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx); - - txe->last_id = desc_idx_last; - desc_idx = txe->next_id; - txe = txn; - } - - if (nb_desc_ipsec) { - volatile struct iavf_tx_ipsec_desc *ipsec_desc = - (volatile struct iavf_tx_ipsec_desc *) - &txr[desc_idx]; - - txn = &txe_ring[txe->next_id]; - RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); - - if (txe->mbuf) { - rte_pktmbuf_free_seg(txe->mbuf); - txe->mbuf = NULL; - } - - iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen); - - IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx); - - txe->last_id = desc_idx_last; - desc_idx = txe->next_id; - txe = txn; - } - - mb_seg = mb; - - do { - ddesc = (volatile struct ci_tx_desc *) - &txr[desc_idx]; - - txn = &txe_ring[txe->next_id]; - RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); - - if (txe->mbuf) - rte_pktmbuf_free_seg(txe->mbuf); - - txe->mbuf = mb_seg; - - if ((mb_seg->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) && - (mb_seg->ol_flags & - (RTE_MBUF_F_TX_TCP_SEG | - RTE_MBUF_F_TX_UDP_SEG))) { - slen = tlen + mb_seg->l2_len + mb_seg->l3_len + - mb_seg->outer_l3_len + ipseclen; - if (mb_seg->ol_flags & RTE_MBUF_F_TX_L4_MASK) - slen += mb_seg->l4_len; - } else { - slen = mb_seg->data_len; - } - - buf_dma_addr = rte_mbuf_data_iova(mb_seg); - while ((mb_seg->ol_flags & (RTE_MBUF_F_TX_TCP_SEG | - RTE_MBUF_F_TX_UDP_SEG)) && - unlikely(slen > CI_MAX_DATA_PER_TXD)) { - iavf_fill_data_desc(ddesc, ddesc_template, - CI_MAX_DATA_PER_TXD, buf_dma_addr); - - IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx); - - buf_dma_addr += CI_MAX_DATA_PER_TXD; - slen -= CI_MAX_DATA_PER_TXD; - - txe->last_id = desc_idx_last; - desc_idx = txe->next_id; - txe = txn; - ddesc = &txr[desc_idx]; - txn = &txe_ring[txe->next_id]; - } - - iavf_fill_data_desc(ddesc, ddesc_template, - slen, buf_dma_addr); - - IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx); - - txe->last_id = desc_idx_last; - desc_idx = txe->next_id; - txe = txn; - mb_seg = mb_seg->next; - } while (mb_seg); - - /* The last packet data descriptor needs End Of Packet (EOP) */ - ddesc_cmd = CI_TX_DESC_CMD_EOP; - - txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_desc_required); - txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_desc_required); - - if (txq->nb_tx_used >= txq->tx_rs_thresh) { - PMD_TX_LOG(DEBUG, "Setting RS bit on TXD id=" - "%4u (port=%d queue=%d)", - desc_idx_last, txq->port_id, txq->queue_id); - - ddesc_cmd |= CI_TX_DESC_CMD_RS; - - /* Update txq RS bit counters */ - txq->nb_tx_used = 0; - } - - ddesc->cmd_type_offset_bsz |= rte_cpu_to_le_64(ddesc_cmd << - IAVF_TXD_DATA_QW1_CMD_SHIFT); - - IAVF_DUMP_TX_DESC(txq, ddesc, desc_idx - 1); - } - -end_of_tx: - rte_wmb(); - - PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", - txq->port_id, txq->queue_id, desc_idx, idx); - - IAVF_PCI_REG_WRITE_RELAXED(txq->qtx_tail, desc_idx); - txq->tx_tail = desc_idx; + const struct ci_ipsec_ops ipsec_ops = { + .get_ipsec_desc = iavf_get_ipsec_desc, + .calc_segment_len = iavf_calc_ipsec_segment_len, + }; - return idx; + /* IAVF does not support timestamp queues, so pass NULL for ts_fns */ + return ci_xmit_pkts(txq, tx_pkts, nb_pkts, + (txq->vlan_flag & IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1) ? + CI_VLAN_IN_L2TAG1 : CI_VLAN_IN_L2TAG2, + iavf_get_context_desc, &ipsec_ops, NULL); } /* Check if the packet with vlan user priority is transmitted in the -- 2.51.0