From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id BB8B7A0C4E; Thu, 12 Aug 2021 16:11:27 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 5A82141231; Thu, 12 Aug 2021 16:10:42 +0200 (CEST) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by mails.dpdk.org (Postfix) with ESMTP id 7E0C841263 for ; Thu, 12 Aug 2021 16:10:40 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10074"; a="300944480" X-IronPort-AV: E=Sophos;i="5.84,316,1620716400"; d="scan'208";a="300944480" Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 12 Aug 2021 07:10:40 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.84,316,1620716400"; d="scan'208";a="446554506" Received: from silpixa00400884.ir.intel.com ([10.243.22.82]) by fmsmga007.fm.intel.com with ESMTP; 12 Aug 2021 07:10:36 -0700 From: Radu Nicolau To: Cc: dev@dpdk.org, mdr@ashroe.eu, konstantin.ananyev@intel.com, vladimir.medvedkin@intel.com, bruce.richardson@intel.com, hemant.agrawal@nxp.com, gakhil@marvell.com, anoobj@marvell.com, declan.doherty@intel.com, abhijit.sinha@intel.com, daniel.m.buckley@intel.com, marchana@marvell.com, ktejasree@marvell.com, matan@nvidia.com, Radu Nicolau , Abhijit Sinha Date: Thu, 12 Aug 2021 14:54:25 +0100 Message-Id: <20210812135425.698189-11-radu.nicolau@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20210812135425.698189-1-radu.nicolau@intel.com> References: <20210713133542.3550525-1-radu.nicolau@intel.com> <20210812135425.698189-1-radu.nicolau@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH v2 10/10] ipsec: add ol_flags support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Set mbuff->ol_flags for IPsec packets. Signed-off-by: Declan Doherty Signed-off-by: Radu Nicolau Signed-off-by: Abhijit Sinha Signed-off-by: Daniel Martin Buckley --- lib/ipsec/esp_inb.c | 17 ++++++++++++-- lib/ipsec/esp_outb.c | 48 ++++++++++++++++++++++++++++++--------- lib/ipsec/rte_ipsec_sa.h | 3 ++- lib/ipsec/sa.c | 49 ++++++++++++++++++++++++++++++++++++++-- lib/ipsec/sa.h | 8 +++++++ 5 files changed, 109 insertions(+), 16 deletions(-) diff --git a/lib/ipsec/esp_inb.c b/lib/ipsec/esp_inb.c index 8cb4c16302..5fcb41297e 100644 --- a/lib/ipsec/esp_inb.c +++ b/lib/ipsec/esp_inb.c @@ -559,7 +559,8 @@ trs_process_step3(struct rte_mbuf *mb) * - tx_offload */ static inline void -tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val) +tun_process_step3(struct rte_mbuf *mb, uint8_t is_ipv4, uint64_t txof_msk, + uint64_t txof_val) { /* reset mbuf metatdata: L2/L3 len, packet type */ mb->packet_type = RTE_PTYPE_UNKNOWN; @@ -567,6 +568,14 @@ tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val) /* clear the PKT_RX_SEC_OFFLOAD flag if set */ mb->ol_flags &= ~PKT_RX_SEC_OFFLOAD; + + if (is_ipv4) { + mb->l3_len = sizeof(struct rte_ipv4_hdr); + mb->ol_flags |= (PKT_TX_IPV4 | PKT_TX_IP_CKSUM); + } else { + mb->l3_len = sizeof(struct rte_ipv6_hdr); + mb->ol_flags |= PKT_TX_IPV6; + } } /* @@ -618,8 +627,12 @@ tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb[], update_tun_inb_l3hdr(sa, outh, inh); /* update mbuf's metadata */ - tun_process_step3(mb[i], sa->tx_offload.msk, + tun_process_step3(mb[i], + (sa->type & RTE_IPSEC_SATP_IPV_MASK) == + RTE_IPSEC_SATP_IPV4 ? 1 : 0, + sa->tx_offload.msk, sa->tx_offload.val); + k++; } else dr[i - k] = i; diff --git a/lib/ipsec/esp_outb.c b/lib/ipsec/esp_outb.c index 8a6d09558f..d8e261e6fb 100644 --- a/lib/ipsec/esp_outb.c +++ b/lib/ipsec/esp_outb.c @@ -19,7 +19,7 @@ typedef int32_t (*esp_outb_prepare_t)(struct rte_ipsec_sa *sa, rte_be64_t sqc, const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb, - union sym_op_data *icv, uint8_t sqh_len); + union sym_op_data *icv, uint8_t sqh_len, uint8_t icrypto); /* * helper function to fill crypto_sym op for cipher+auth algorithms. @@ -140,9 +140,9 @@ outb_cop_prepare(struct rte_crypto_op *cop, static inline int32_t outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb, - union sym_op_data *icv, uint8_t sqh_len) + union sym_op_data *icv, uint8_t sqh_len, uint8_t icrypto) { - uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen; + uint32_t clen, hlen, l2len, l3len, pdlen, pdofs, plen, tlen; struct rte_mbuf *ml; struct rte_esp_hdr *esph; struct rte_esp_tail *espt; @@ -154,6 +154,8 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, /* size of ipsec protected data */ l2len = mb->l2_len; + l3len = mb->l3_len; + plen = mb->pkt_len - l2len; /* number of bytes to encrypt */ @@ -190,8 +192,26 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs); /* update pkt l2/l3 len */ - mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) | - sa->tx_offload.val; + if (icrypto) { + mb->tx_offload = + (mb->tx_offload & sa->inline_crypto.tx_offload.msk) | + sa->inline_crypto.tx_offload.val; + mb->l3_len = l3len; + + mb->ol_flags |= sa->inline_crypto.tx_ol_flags; + + /* set ip checksum offload for inner */ + if ((sa->type & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4) + mb->ol_flags |= (PKT_TX_IPV4 | PKT_TX_IP_CKSUM); + else if ((sa->type & RTE_IPSEC_SATP_IPV_MASK) + == RTE_IPSEC_SATP_IPV6) + mb->ol_flags |= PKT_TX_IPV6; + } else { + mb->tx_offload = (mb->tx_offload & sa->tx_offload.msk) | + sa->tx_offload.val; + + mb->ol_flags |= sa->tx_ol_flags; + } /* copy tunnel pkt header */ rte_memcpy(ph, sa->hdr, sa->hdr_len); @@ -311,7 +331,7 @@ esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], /* try to update the packet itself */ rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, - sa->sqh_len); + sa->sqh_len, 0); /* success, setup crypto op */ if (rc >= 0) { outb_pkt_xprepare(sa, sqc, &icv); @@ -338,7 +358,7 @@ esp_outb_tun_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], static inline int32_t outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb, - union sym_op_data *icv, uint8_t sqh_len) + union sym_op_data *icv, uint8_t sqh_len, uint8_t icrypto __rte_unused) { uint8_t np; uint32_t clen, hlen, pdlen, pdofs, plen, tlen, uhlen; @@ -394,10 +414,16 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, /* shift L2/L3 headers */ insert_esph(ph, ph + hlen, uhlen); + if ((sa->type & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV4) + mb->ol_flags |= (PKT_TX_IPV4 | PKT_TX_IP_CKSUM); + else if ((sa->type & RTE_IPSEC_SATP_IPV_MASK) == RTE_IPSEC_SATP_IPV6) + mb->ol_flags |= PKT_TX_IPV6; + /* update ip header fields */ np = update_trs_l34hdrs(sa, ph + l2len, mb->pkt_len - sqh_len, l2len, l3len, IPPROTO_ESP, tso); + /* update spi, seqn and iv */ esph = (struct rte_esp_hdr *)(ph + uhlen); iv = (uint64_t *)(esph + 1); @@ -463,7 +489,7 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], /* try to update the packet itself */ rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, - sa->sqh_len); + sa->sqh_len, 0); /* success, setup crypto op */ if (rc >= 0) { outb_pkt_xprepare(sa, sqc, &icv); @@ -560,7 +586,7 @@ cpu_outb_pkt_prepare(const struct rte_ipsec_session *ss, gen_iv(ivbuf[k], sqc); /* try to update the packet itself */ - rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len); + rc = prepare(sa, sqc, ivbuf[k], mb[i], &icv, sa->sqh_len, 0); /* success, proceed with preparations */ if (rc >= 0) { @@ -741,7 +767,7 @@ inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss, gen_iv(iv, sqc); /* try to update the packet itself */ - rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0); + rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0, 1); k += (rc >= 0); @@ -808,7 +834,7 @@ inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss, gen_iv(iv, sqc); /* try to update the packet itself */ - rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0); + rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0, 0); k += (rc >= 0); diff --git a/lib/ipsec/rte_ipsec_sa.h b/lib/ipsec/rte_ipsec_sa.h index 40d1e70d45..3c36dcaa77 100644 --- a/lib/ipsec/rte_ipsec_sa.h +++ b/lib/ipsec/rte_ipsec_sa.h @@ -38,7 +38,8 @@ struct rte_ipsec_sa_prm { union { struct { uint8_t hdr_len; /**< tunnel header len */ - uint8_t hdr_l3_off; /**< offset for IPv4/IPv6 header */ + uint8_t hdr_l3_off; /**< tunnel l3 header len */ + uint8_t hdr_l3_len; /**< tunnel l3 header len */ uint8_t next_proto; /**< next header protocol */ const void *hdr; /**< tunnel header template */ } tun; /**< tunnel mode related parameters */ diff --git a/lib/ipsec/sa.c b/lib/ipsec/sa.c index 242fdcd461..51f71b30c6 100644 --- a/lib/ipsec/sa.c +++ b/lib/ipsec/sa.c @@ -17,6 +17,8 @@ #define MBUF_MAX_L2_LEN RTE_LEN2MASK(RTE_MBUF_L2_LEN_BITS, uint64_t) #define MBUF_MAX_L3_LEN RTE_LEN2MASK(RTE_MBUF_L3_LEN_BITS, uint64_t) +#define MBUF_MAX_TSO_LEN RTE_LEN2MASK(RTE_MBUF_TSO_SEGSZ_BITS, uint64_t) +#define MBUF_MAX_OL3_LEN RTE_LEN2MASK(RTE_MBUF_OUTL3_LEN_BITS, uint64_t) /* some helper structures */ struct crypto_xform { @@ -348,6 +350,11 @@ esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen, uint64_t sqn) sa->cofs.ofs.cipher.head = sa->ctp.cipher.offset - sa->ctp.auth.offset; sa->cofs.ofs.cipher.tail = (sa->ctp.auth.offset + sa->ctp.auth.length) - (sa->ctp.cipher.offset + sa->ctp.cipher.length); + + if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV4) + sa->tx_ol_flags |= (PKT_TX_IPV4 | PKT_TX_IP_CKSUM); + else if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV6) + sa->tx_ol_flags |= PKT_TX_IPV6; } /* @@ -362,9 +369,43 @@ esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm) sa->hdr_len = prm->tun.hdr_len; sa->hdr_l3_off = prm->tun.hdr_l3_off; + + /* update l2_len and l3_len fields for outbound mbuf */ + sa->inline_crypto.tx_offload.val = rte_mbuf_tx_offload( + 0, /* iL2_LEN */ + 0, /* iL3_LEN */ + 0, /* iL4_LEN */ + 0, /* TSO_SEG_SZ */ + prm->tun.hdr_l3_len, /* oL3_LEN */ + prm->tun.hdr_l3_off, /* oL2_LEN */ + 0); + + sa->inline_crypto.tx_ol_flags |= PKT_TX_TUNNEL_ESP; + + if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV4) + sa->inline_crypto.tx_ol_flags |= PKT_TX_OUTER_IPV4; + else if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV6) + sa->inline_crypto.tx_ol_flags |= PKT_TX_OUTER_IPV6; + + if (sa->inline_crypto.tx_ol_flags & PKT_TX_OUTER_IPV4) + sa->inline_crypto.tx_ol_flags |= PKT_TX_OUTER_IP_CKSUM; + if (sa->tx_ol_flags & PKT_TX_IPV4) + sa->inline_crypto.tx_ol_flags |= PKT_TX_IP_CKSUM; + /* update l2_len and l3_len fields for outbound mbuf */ - sa->tx_offload.val = rte_mbuf_tx_offload(sa->hdr_l3_off, - sa->hdr_len - sa->hdr_l3_off, 0, 0, 0, 0, 0); + sa->tx_offload.val = rte_mbuf_tx_offload( + prm->tun.hdr_l3_off, /* iL2_LEN */ + prm->tun.hdr_l3_len, /* iL3_LEN */ + 0, /* iL4_LEN */ + 0, /* TSO_SEG_SZ */ + 0, /* oL3_LEN */ + 0, /* oL2_LEN */ + 0); + + if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV4) + sa->tx_ol_flags |= (PKT_TX_IPV4 | PKT_TX_IP_CKSUM); + else if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV6) + sa->tx_ol_flags |= PKT_TX_IPV6; memcpy(sa->hdr, prm->tun.hdr, sa->hdr_len); @@ -473,6 +514,10 @@ esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm, sa->salt = prm->ipsec_xform.salt; /* preserve all values except l2_len and l3_len */ + sa->inline_crypto.tx_offload.msk = + ~rte_mbuf_tx_offload(MBUF_MAX_L2_LEN, MBUF_MAX_L3_LEN, + 0, 0, MBUF_MAX_OL3_LEN, 0, 0); + sa->tx_offload.msk = ~rte_mbuf_tx_offload(MBUF_MAX_L2_LEN, MBUF_MAX_L3_LEN, 0, 0, 0, 0, 0); diff --git a/lib/ipsec/sa.h b/lib/ipsec/sa.h index b9b7ebec5b..172d094c4b 100644 --- a/lib/ipsec/sa.h +++ b/lib/ipsec/sa.h @@ -101,6 +101,14 @@ struct rte_ipsec_sa { uint64_t msk; uint64_t val; } tx_offload; + uint64_t tx_ol_flags; + struct { + uint64_t tx_ol_flags; + struct { + uint64_t msk; + uint64_t val; + } tx_offload; + } inline_crypto; struct { uint16_t sport; uint16_t dport; -- 2.25.1