From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id EF2E9A034F; Mon, 11 Oct 2021 13:42:09 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 234534111F; Mon, 11 Oct 2021 13:42:00 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by mails.dpdk.org (Postfix) with ESMTP id 85D5941103 for ; Mon, 11 Oct 2021 13:41:58 +0200 (CEST) X-IronPort-AV: E=McAfee;i="6200,9189,10133"; a="207660532" X-IronPort-AV: E=Sophos;i="5.85,364,1624345200"; d="scan'208";a="207660532" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 11 Oct 2021 04:41:42 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.85,364,1624345200"; d="scan'208";a="479822023" Received: from silpixa00400884.ir.intel.com ([10.243.22.82]) by orsmga007.jf.intel.com with ESMTP; 11 Oct 2021 04:41:39 -0700 From: Radu Nicolau To: Konstantin Ananyev , Bernard Iremonger , Vladimir Medvedkin Cc: dev@dpdk.org, mdr@ashroe.eu, bruce.richardson@intel.com, roy.fan.zhang@intel.com, hemant.agrawal@nxp.com, gakhil@marvell.com, anoobj@marvell.com, declan.doherty@intel.com, abhijit.sinha@intel.com, daniel.m.buckley@intel.com, marchana@marvell.com, ktejasree@marvell.com, matan@nvidia.com, Radu Nicolau Date: Mon, 11 Oct 2021 12:29:41 +0100 Message-Id: <20211011112945.2876-7-radu.nicolau@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20211011112945.2876-1-radu.nicolau@intel.com> References: <20210713133542.3550525-1-radu.nicolau@intel.com> <20211011112945.2876-1-radu.nicolau@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH v8 06/10] ipsec: add transmit segmentation offload support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support for transmit segmentation offload to inline crypto processing mode. This offload is not supported by other offload modes, as at a minimum it requires inline crypto for IPsec to be supported on the network interface. Signed-off-by: Declan Doherty Signed-off-by: Radu Nicolau Signed-off-by: Abhijit Sinha Signed-off-by: Daniel Martin Buckley Acked-by: Fan Zhang --- doc/guides/prog_guide/ipsec_lib.rst | 2 + doc/guides/rel_notes/release_21_11.rst | 1 + lib/ipsec/esp_outb.c | 119 +++++++++++++++++++++---- 3 files changed, 103 insertions(+), 19 deletions(-) diff --git a/doc/guides/prog_guide/ipsec_lib.rst b/doc/guides/prog_guide/ipsec_lib.rst index af51ff8131..fc0af5eadb 100644 --- a/doc/guides/prog_guide/ipsec_lib.rst +++ b/doc/guides/prog_guide/ipsec_lib.rst @@ -315,6 +315,8 @@ Supported features * NAT-T / UDP encapsulated ESP. +* TSO support (only for inline crypto mode) + * algorithms: 3DES-CBC, AES-CBC, AES-CTR, AES-GCM, AES_CCM, CHACHA20_POLY1305, AES_GMAC, HMAC-SHA1, NULL. diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst index 73a566eaca..77535ace36 100644 --- a/doc/guides/rel_notes/release_21_11.rst +++ b/doc/guides/rel_notes/release_21_11.rst @@ -138,6 +138,7 @@ New Features * Added support for AEAD algorithms AES_CCM, CHACHA20_POLY1305 and AES_GMAC. * Added support for NAT-T / UDP encapsulated ESP + * Added support TSO offload support; only supported for inline crypto mode. Removed Items diff --git a/lib/ipsec/esp_outb.c b/lib/ipsec/esp_outb.c index 0e3314b358..df7d3e8645 100644 --- a/lib/ipsec/esp_outb.c +++ b/lib/ipsec/esp_outb.c @@ -147,6 +147,7 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, struct rte_esp_tail *espt; char *ph, *pt; uint64_t *iv; + uint8_t tso = !!(mb->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)); /* calculate extra header space required */ hlen = sa->hdr_len + sa->iv_len + sizeof(*esph); @@ -157,11 +158,20 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, /* number of bytes to encrypt */ clen = plen + sizeof(*espt); - clen = RTE_ALIGN_CEIL(clen, sa->pad_align); + + /* We don't need to pad/ailgn packet when using TSO offload */ + if (likely(!tso)) + clen = RTE_ALIGN_CEIL(clen, sa->pad_align); + /* pad length + esp tail */ pdlen = clen - plen; - tlen = pdlen + sa->icv_len + sqh_len; + + /* We don't append ICV length when using TSO offload */ + if (likely(!tso)) + tlen = pdlen + sa->icv_len + sqh_len; + else + tlen = pdlen + sqh_len; /* do append and prepend */ ml = rte_pktmbuf_lastseg(mb); @@ -346,6 +356,7 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, char *ph, *pt; uint64_t *iv; uint32_t l2len, l3len; + uint8_t tso = !!(mb->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)); l2len = mb->l2_len; l3len = mb->l3_len; @@ -358,11 +369,19 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc, /* number of bytes to encrypt */ clen = plen + sizeof(*espt); - clen = RTE_ALIGN_CEIL(clen, sa->pad_align); + + /* We don't need to pad/ailgn packet when using TSO offload */ + if (likely(!tso)) + clen = RTE_ALIGN_CEIL(clen, sa->pad_align); /* pad length + esp tail */ pdlen = clen - plen; - tlen = pdlen + sa->icv_len + sqh_len; + + /* We don't append ICV length when using TSO offload */ + if (likely(!tso)) + tlen = pdlen + sa->icv_len + sqh_len; + else + tlen = pdlen + sqh_len; /* do append and insert */ ml = rte_pktmbuf_lastseg(mb); @@ -660,6 +679,29 @@ inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss, } } +/* check if packet will exceed MSS and segmentation is required */ +static inline int +esn_outb_nb_segments(struct rte_mbuf *m) { + uint16_t segments = 1; + uint16_t pkt_l3len = m->pkt_len - m->l2_len; + + /* Only support segmentation for UDP/TCP flows */ + if (!(m->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP))) + return segments; + + if (m->tso_segsz > 0 && pkt_l3len > m->tso_segsz) { + segments = pkt_l3len / m->tso_segsz; + if (segments * m->tso_segsz < pkt_l3len) + segments++; + if (m->packet_type & RTE_PTYPE_L4_TCP) + m->ol_flags |= (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM); + else + m->ol_flags |= (PKT_TX_UDP_SEG | PKT_TX_UDP_CKSUM); + } + + return segments; +} + /* * process group of ESP outbound tunnel packets destined for * INLINE_CRYPTO type of device. @@ -669,24 +711,36 @@ inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], uint16_t num) { int32_t rc; - uint32_t i, k, n; + uint32_t i, k, nb_sqn = 0, nb_sqn_alloc; uint64_t sqn; rte_be64_t sqc; struct rte_ipsec_sa *sa; union sym_op_data icv; uint64_t iv[IPSEC_MAX_IV_QWORD]; uint32_t dr[num]; + uint16_t nb_segs[num]; sa = ss->sa; - n = num; - sqn = esn_outb_update_sqn(sa, &n); - if (n != num) + for (i = 0; i != num; i++) { + nb_segs[i] = esn_outb_nb_segments(mb[i]); + nb_sqn += nb_segs[i]; + /* setup offload fields for TSO */ + if (nb_segs[i] > 1) { + mb[i]->ol_flags |= (PKT_TX_OUTER_IPV4 | + PKT_TX_OUTER_IP_CKSUM | + PKT_TX_TUNNEL_ESP); + mb[i]->outer_l3_len = mb[i]->l3_len; + } + } + + nb_sqn_alloc = nb_sqn; + sqn = esn_outb_update_sqn(sa, &nb_sqn_alloc); + if (nb_sqn_alloc != nb_sqn) rte_errno = EOVERFLOW; k = 0; - for (i = 0; i != n; i++) { - + for (i = 0; i != num; i++) { sqc = rte_cpu_to_be_64(sqn + i); gen_iv(iv, sqc); @@ -700,11 +754,18 @@ inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss, dr[i - k] = i; rte_errno = -rc; } + + /** + * If packet is using tso, increment sqn by the number of + * segments for packet + */ + if (mb[i]->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) + sqn += nb_segs[i] - 1; } /* copy not processed mbufs beyond good ones */ - if (k != n && k != 0) - move_bad_mbufs(mb, dr, n, n - k); + if (k != num && k != 0) + move_bad_mbufs(mb, dr, num, num - k); inline_outb_mbuf_prepare(ss, mb, k); return k; @@ -719,23 +780,36 @@ inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], uint16_t num) { int32_t rc; - uint32_t i, k, n; + uint32_t i, k, nb_sqn, nb_sqn_alloc; uint64_t sqn; rte_be64_t sqc; struct rte_ipsec_sa *sa; union sym_op_data icv; uint64_t iv[IPSEC_MAX_IV_QWORD]; uint32_t dr[num]; + uint16_t nb_segs[num]; sa = ss->sa; - n = num; - sqn = esn_outb_update_sqn(sa, &n); - if (n != num) + /* Calculate number of sequence numbers required */ + for (i = 0, nb_sqn = 0; i != num; i++) { + nb_segs[i] = esn_outb_nb_segments(mb[i]); + nb_sqn += nb_segs[i]; + /* setup offload fields for TSO */ + if (nb_segs[i] > 1) { + mb[i]->ol_flags |= (PKT_TX_OUTER_IPV4 | + PKT_TX_OUTER_IP_CKSUM); + mb[i]->outer_l3_len = mb[i]->l3_len; + } + } + + nb_sqn_alloc = nb_sqn; + sqn = esn_outb_update_sqn(sa, &nb_sqn_alloc); + if (nb_sqn_alloc != nb_sqn) rte_errno = EOVERFLOW; k = 0; - for (i = 0; i != n; i++) { + for (i = 0; i != num; i++) { sqc = rte_cpu_to_be_64(sqn + i); gen_iv(iv, sqc); @@ -750,11 +824,18 @@ inline_outb_trs_pkt_process(const struct rte_ipsec_session *ss, dr[i - k] = i; rte_errno = -rc; } + + /** + * If packet is using tso, increment sqn by the number of + * segments for packet + */ + if (mb[i]->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)) + sqn += nb_segs[i] - 1; } /* copy not processed mbufs beyond good ones */ - if (k != n && k != 0) - move_bad_mbufs(mb, dr, n, n - k); + if (k != num && k != 0) + move_bad_mbufs(mb, dr, num, num - k); inline_outb_mbuf_prepare(ss, mb, k); return k; -- 2.25.1