From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by dpdk.org (Postfix) with ESMTP id 8D2611B58A for ; Tue, 9 Oct 2018 20:24:17 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga008.fm.intel.com ([10.253.24.58]) by fmsmga105.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 09 Oct 2018 11:24:15 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.54,361,1534834800"; d="scan'208";a="77469439" Received: from sivswdev02.ir.intel.com (HELO localhost.localdomain) ([10.237.217.46]) by fmsmga008.fm.intel.com with ESMTP; 09 Oct 2018 11:23:56 -0700 From: Konstantin Ananyev To: dev@dpdk.org Cc: Konstantin Ananyev , Mohammad Abdul Awal Date: Tue, 9 Oct 2018 19:23:37 +0100 Message-Id: <1539109420-13412-7-git-send-email-konstantin.ananyev@intel.com> X-Mailer: git-send-email 1.7.0.7 In-Reply-To: <1535129598-27301-1-git-send-email-konstantin.ananyev@intel.com> References: <1535129598-27301-1-git-send-email-konstantin.ananyev@intel.com> Subject: [dpdk-dev] [RFC v2 6/9] ipsec: implement SA data-path API X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 09 Oct 2018 18:24:18 -0000 Provide implementation for rte_ipsec_crypto_prepare() and rte_ipsec_process(). Current implementation: - supports ESP protocol tunnel mode only. - supports ESN and replay window. - supports algorithms: AES-CBC, AES-GCM, HMAC-SHA1, NULL. - covers all currently defined security session types: - RTE_SECURITY_ACTION_TYPE_NONE - RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO - RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL - RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL For first two types SQN check/update is done by SW (inside the library). For last two type it is HW/PMD responsibility. Signed-off-by: Mohammad Abdul Awal Signed-off-by: Konstantin Ananyev --- lib/librte_ipsec/crypto.h | 74 +++++ lib/librte_ipsec/ipsec_sqn.h | 144 ++++++++- lib/librte_ipsec/pad.h | 45 +++ lib/librte_ipsec/sa.c | 681 ++++++++++++++++++++++++++++++++++++++++--- 4 files changed, 909 insertions(+), 35 deletions(-) create mode 100644 lib/librte_ipsec/crypto.h create mode 100644 lib/librte_ipsec/pad.h diff --git a/lib/librte_ipsec/crypto.h b/lib/librte_ipsec/crypto.h new file mode 100644 index 000000000..6ff995c59 --- /dev/null +++ b/lib/librte_ipsec/crypto.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#ifndef _CRYPTO_H_ +#define _CRYPTO_H_ + +/** + * @file crypto.h + * Contains crypto specific functions/structures/macros used internally + * by ipsec library. + */ + + /* + * AES-GCM devices have some specific requirements for IV and AAD formats. + * Ideally that to be done by the driver itself. + */ + +struct aead_gcm_iv { + uint32_t salt; + uint64_t iv; + uint32_t cnt; +} __attribute__((packed)); + +struct aead_gcm_aad { + uint32_t spi; + /* + * RFC 4106, section 5: + * Two formats of the AAD are defined: + * one for 32-bit sequence numbers, and one for 64-bit ESN. + */ + union { + uint32_t u32; + uint64_t u64; + } sqn; + uint32_t align0; /* align to 16B boundary */ +} __attribute__((packed)); + +struct gcm_esph_iv { + struct esp_hdr esph; + uint64_t iv; +} __attribute__((packed)); + + +static inline void +aead_gcm_iv_fill(struct aead_gcm_iv *gcm, uint64_t iv, uint32_t salt) +{ + gcm->salt = salt; + gcm->iv = iv; + gcm->cnt = rte_cpu_to_be_32(1); +} + +/* + * RFC 4106, 5 AAD Construction + */ +static inline void +aead_gcm_aad_fill(struct aead_gcm_aad *aad, const struct gcm_esph_iv *hiv, + int esn) +{ + aad->spi = hiv->esph.spi; + if (esn) + aad->sqn.u64 = hiv->iv; + else + aad->sqn.u32 = hiv->esph.seq; +} + +static inline void +gen_iv(uint64_t iv[IPSEC_MAX_IV_QWORD], uint64_t sqn) +{ + iv[0] = rte_cpu_to_be_64(sqn); + iv[1] = 0; +} + +#endif /* _CRYPTO_H_ */ diff --git a/lib/librte_ipsec/ipsec_sqn.h b/lib/librte_ipsec/ipsec_sqn.h index d0d122824..7477b8d59 100644 --- a/lib/librte_ipsec/ipsec_sqn.h +++ b/lib/librte_ipsec/ipsec_sqn.h @@ -15,7 +15,7 @@ #define IS_ESN(sa) ((sa)->sqn_mask == UINT64_MAX) -/** +/* * for given size, calculate required number of buckets. */ static uint32_t @@ -30,6 +30,148 @@ replay_num_bucket(uint32_t wsz) return nb; } +/* + * According to RFC4303 A2.1, determine the high-order bit of sequence number. + * use 32bit arithmetic inside, return uint64_t. + */ +static inline uint64_t +reconstruct_esn(uint64_t t, uint32_t sqn, uint32_t w) +{ + uint32_t th, tl, bl; + + tl = t; + th = t >> 32; + bl = tl - w + 1; + + /* case A: window is within one sequence number subspace */ + if (tl >= (w - 1)) + th += (sqn < bl); + /* case B: window spans two sequence number subspaces */ + else if (th != 0) + th -= (sqn >= bl); + + /* return constructed sequence with proper high-order bits */ + return (uint64_t)th << 32 | sqn; +} + +/** + * Perform the replay checking. + * + * struct rte_ipsec_sa contains the window and window related parameters, + * such as the window size, bitmask, and the last acknowledged sequence number. + * + * Based on RFC 6479. + * Blocks are 64 bits unsigned integers + */ +static int32_t +esn_inb_check_sqn(const struct replay_sqn *rsn, const struct rte_ipsec_sa *sa, + uint64_t sqn) +{ + uint32_t bit, bucket; + + /* seq not valid (first or wrapped) */ + if (sqn == 0) + return -EINVAL; + + /* replay not enabled */ + if (sa->replay.win_sz == 0) + return 0; + + /* handle ESN */ + if (IS_ESN(sa)) + sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz); + + /* seq is larger than lastseq */ + if (sqn > rsn->sqn) + return 0; + + /* seq is outside window */ + if ((sqn + sa->replay.win_sz) < rsn->sqn) + return -EINVAL; + + /* seq is inside the window */ + bit = sqn & WINDOW_BIT_LOC_MASK; + bucket = (sqn >> WINDOW_BUCKET_BITS) & sa->replay.bucket_index_mask; + + /* already seen packet */ + if (rsn->window[bucket] & ((uint64_t)1 << bit)) + return -EINVAL; + + return 0; +} + +/** + * For outbound SA perform the sequence number update. + */ +static inline uint64_t +esn_outb_update_sqn(struct rte_ipsec_sa *sa, uint32_t *num) +{ + uint64_t n, s, sqn; + + n = *num; + sqn = sa->sqn.outb + n; + sa->sqn.outb = sqn; + + /* overflow */ + if (sqn > sa->sqn_mask) { + s = sqn - sa->sqn_mask; + *num = (s < n) ? n - s : 0; + } + + return sqn - n; +} + +/** + * For inbound SA perform the sequence number and replay window update. + */ +static inline int32_t +esn_inb_update_sqn(struct replay_sqn *rsn, const struct rte_ipsec_sa *sa, + uint64_t sqn) +{ + uint32_t bit, bucket, last_bucket, new_bucket, diff, i; + + /* replay not enabled */ + if (sa->replay.win_sz == 0) + return 0; + + /* handle ESN */ + if (IS_ESN(sa)) + sqn = reconstruct_esn(rsn->sqn, sqn, sa->replay.win_sz); + + /* seq is outside window*/ + if ((sqn + sa->replay.win_sz) < rsn->sqn) + return -EINVAL; + + /* update the bit */ + bucket = (sqn >> WINDOW_BUCKET_BITS); + + /* check if the seq is within the range */ + if (sqn > rsn->sqn) { + last_bucket = rsn->sqn >> WINDOW_BUCKET_BITS; + diff = bucket - last_bucket; + /* seq is way after the range of WINDOW_SIZE */ + if (diff > sa->replay.nb_bucket) + diff = sa->replay.nb_bucket; + + for (i = 0; i != diff; i++) { + new_bucket = (i + last_bucket + 1) & + sa->replay.bucket_index_mask; + rsn->window[new_bucket] = 0; + } + rsn->sqn = sqn; + } + + bucket &= sa->replay.bucket_index_mask; + bit = (uint64_t)1 << (sqn & WINDOW_BIT_LOC_MASK); + + /* already seen packet */ + if (rsn->window[bucket] & bit) + return -EINVAL; + + rsn->window[bucket] |= bit; + return 0; +} + /** * Based on number of buckets calculated required size for the * structure that holds replay window and sequnce number (RSN) information. diff --git a/lib/librte_ipsec/pad.h b/lib/librte_ipsec/pad.h new file mode 100644 index 000000000..2f5ccd00e --- /dev/null +++ b/lib/librte_ipsec/pad.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#ifndef _PAD_H_ +#define _PAD_H_ + +#define IPSEC_MAX_PAD_SIZE UINT8_MAX + +static const uint8_t esp_pad_bytes[IPSEC_MAX_PAD_SIZE] = { + 1, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, + 57, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, + 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 91, 92, 93, 94, 95, 96, + 97, 98, 99, 100, 101, 102, 103, 104, + 105, 106, 107, 108, 109, 110, 111, 112, + 113, 114, 115, 116, 117, 118, 119, 120, + 121, 122, 123, 124, 125, 126, 127, 128, + 129, 130, 131, 132, 133, 134, 135, 136, + 137, 138, 139, 140, 141, 142, 143, 144, + 145, 146, 147, 148, 149, 150, 151, 152, + 153, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 191, 192, + 193, 194, 195, 196, 197, 198, 199, 200, + 201, 202, 203, 204, 205, 206, 207, 208, + 209, 210, 211, 212, 213, 214, 215, 216, + 217, 218, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 228, 229, 230, 231, 232, + 233, 234, 235, 236, 237, 238, 239, 240, + 241, 242, 243, 244, 245, 246, 247, 248, + 249, 250, 251, 252, 253, 254, 255, +}; + +#endif /* _PAD_H_ */ diff --git a/lib/librte_ipsec/sa.c b/lib/librte_ipsec/sa.c index ad2aa29df..ae8ce4f24 100644 --- a/lib/librte_ipsec/sa.c +++ b/lib/librte_ipsec/sa.c @@ -6,9 +6,12 @@ #include #include #include +#include #include "sa.h" #include "ipsec_sqn.h" +#include "crypto.h" +#include "pad.h" /* some helper structures */ struct crypto_xform { @@ -174,11 +177,13 @@ esp_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm, /* RFC 4106 */ if (cxf->aead->algo != RTE_CRYPTO_AEAD_AES_GCM) return -EINVAL; + sa->aad_len = sizeof(struct aead_gcm_aad); sa->icv_len = cxf->aead->digest_length; sa->iv_ofs = cxf->aead->iv.offset; sa->iv_len = sizeof(uint64_t); sa->pad_align = 4; } else { + sa->aad_len = 0; sa->icv_len = cxf->auth->digest_length; sa->iv_ofs = cxf->cipher->iv.offset; if (cxf->cipher->algo == RTE_CRYPTO_CIPHER_NULL) { @@ -191,7 +196,6 @@ esp_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm, return -EINVAL; } - sa->aad_len = 0; sa->udata = prm->userdata; sa->spi = rte_cpu_to_be_32(prm->ipsec_xform.spi); sa->salt = prm->ipsec_xform.salt; @@ -281,72 +285,681 @@ rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm, return sz; } +static inline void +esp_outb_tun_cop_prepare(struct rte_crypto_op *cop, + const struct rte_ipsec_sa *sa, const uint64_t ivp[IPSEC_MAX_IV_QWORD], + const union sym_op_data *icv, uint32_t plen) +{ + struct rte_crypto_sym_op *sop; + struct aead_gcm_iv *gcm; + + /* fill sym op fields */ + sop = cop->sym; + + /* AEAD (AES_GCM) case */ + if (sa->aad_len != 0) { + sop->aead.data.offset = sa->ctp.cipher.offset; + sop->aead.data.length = sa->ctp.cipher.length + plen; + sop->aead.digest.data = icv->va; + sop->aead.digest.phys_addr = icv->pa; + sop->aead.aad.data = icv->va + sa->icv_len; + sop->aead.aad.phys_addr = icv->pa + sa->icv_len; + + /* fill AAD IV (located inside crypto op) */ + gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *, + sa->iv_ofs); + aead_gcm_iv_fill(gcm, ivp[0], sa->salt); + /* CRYPT+AUTH case */ + } else { + sop->cipher.data.offset = sa->ctp.cipher.offset; + sop->cipher.data.length = sa->ctp.cipher.length + plen; + sop->auth.data.offset = sa->ctp.auth.offset; + sop->auth.data.length = sa->ctp.auth.length + plen; + sop->auth.digest.data = icv->va; + sop->auth.digest.phys_addr = icv->pa; + } +} + +static inline int32_t +esp_outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, uint64_t sqn, + const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb, + union sym_op_data *icv) +{ + uint32_t clen, hlen, pdlen, pdofs, tlen; + struct rte_mbuf *ml; + struct esp_hdr *esph; + struct esp_tail *espt; + struct aead_gcm_aad *aad; + char *ph, *pt; + uint64_t *iv; + + /* calculate extra header space required */ + hlen = sa->hdr_len + sa->iv_len + sizeof(*esph); + + /* number of bytes to encrypt */ + clen = mb->pkt_len + sizeof(*espt); + clen = RTE_ALIGN_CEIL(clen, sa->pad_align); + + /* pad length + esp tail */ + pdlen = clen - mb->pkt_len; + tlen = pdlen + sa->icv_len; + + /* do append and prepend */ + ml = rte_pktmbuf_lastseg(mb); + if (tlen + sa->aad_len > rte_pktmbuf_tailroom(ml)) + return -ENOSPC; + + /* prepend header */ + ph = rte_pktmbuf_prepend(mb, hlen); + if (ph == NULL) + return -ENOSPC; + + /* append tail */ + pdofs = ml->data_len; + ml->data_len += tlen; + mb->pkt_len += tlen; + pt = rte_pktmbuf_mtod_offset(ml, typeof(pt), pdofs); + + /* update pkt l2/l3 len */ + mb->l2_len = sa->hdr_l3_off; + mb->l3_len = sa->hdr_len - sa->hdr_l3_off; + + /* copy tunnel pkt header */ + rte_memcpy(ph, sa->hdr, sa->hdr_len); + + /* update original and new ip header fields */ + if (sa->type & RTE_IPSEC_SATP_MODE_TUNLV4) { + struct ipv4_hdr *l3h; + l3h = (struct ipv4_hdr *)(ph + sa->hdr_l3_off); + l3h->packet_id = rte_cpu_to_be_16(sqn); + l3h->total_length = rte_cpu_to_be_16(mb->pkt_len - + sa->hdr_l3_off); + } else { + struct ipv6_hdr *l3h; + l3h = (struct ipv6_hdr *)(ph + sa->hdr_l3_off); + l3h->payload_len = rte_cpu_to_be_16(mb->pkt_len - + sa->hdr_l3_off - sizeof(*l3h)); + } + + /* update spi, seqn and iv */ + esph = (struct esp_hdr *)(ph + sa->hdr_len); + iv = (uint64_t *)(esph + 1); + rte_memcpy(iv, ivp, sa->iv_len); + + esph->spi = sa->spi; + esph->seq = rte_cpu_to_be_32(sqn); + + /* offset for ICV */ + pdofs += pdlen; + + /* pad length */ + pdlen -= sizeof(*espt); + + /* copy padding data */ + rte_memcpy(pt, esp_pad_bytes, pdlen); + + /* update esp trailer */ + espt = (struct esp_tail *)(pt + pdlen); + espt->pad_len = pdlen; + espt->next_proto = sa->proto; + + icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs); + icv->pa = rte_pktmbuf_iova_offset(ml, pdofs); + + /* + * fill IV and AAD fields, if any (aad fields are placed after icv), + * right now we support only one AEAD algorithm: AES-GCM . + */ + if (sa->aad_len != 0) { + aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len); + aead_gcm_aad_fill(aad, (const struct gcm_esph_iv *)esph, + IS_ESN(sa)); + } + + return clen; +} + +static inline uint16_t +esp_outb_tun_prepare(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[], + struct rte_crypto_op *cop[], struct rte_mbuf *dr[], uint16_t num) +{ + int32_t rc; + uint32_t i, k, n; + uint64_t sqn; + union sym_op_data icv; + uint64_t iv[IPSEC_MAX_IV_QWORD]; + + n = num; + sqn = esn_outb_update_sqn(sa, &n); + if (n != num) + rte_errno = EOVERFLOW; + + k = 0; + for (i = 0; i != n; i++) { + + gen_iv(iv, sqn + i); + + /* try to update the packet itself */ + rc = esp_outb_tun_pkt_prepare(sa, sqn + i, iv, mb[i], &icv); + + /* success, setup crypto op */ + if (rc >= 0) { + mb[k] = mb[i]; + esp_outb_tun_cop_prepare(cop[k], sa, iv, &icv, rc); + k++; + /* failure, put packet into the death-row */ + } else { + dr[i - k] = mb[i]; + rte_errno = -rc; + } + } + + return k; +} + +static inline int32_t +esp_inb_tun_cop_prepare(struct rte_crypto_op *cop, + const struct rte_ipsec_sa *sa, struct rte_mbuf *mb, + const union sym_op_data *icv, uint32_t pofs, uint32_t plen) +{ + struct rte_crypto_sym_op *sop; + struct aead_gcm_iv *gcm; + uint64_t *ivc, *ivp; + uint32_t clen; + + clen = plen - sa->ctp.cipher.length; + if ((int32_t)clen < 0 || (clen & (sa->pad_align - 1)) != 0) + return -EINVAL; + + /* fill sym op fields */ + sop = cop->sym; + + /* AEAD (AES_GCM) case */ + if (sa->aad_len != 0) { + sop->aead.data.offset = pofs + sa->ctp.cipher.offset; + sop->aead.data.length = clen; + sop->aead.digest.data = icv->va; + sop->aead.digest.phys_addr = icv->pa; + sop->aead.aad.data = icv->va + sa->icv_len; + sop->aead.aad.phys_addr = icv->pa + sa->icv_len; + + /* fill AAD IV (located inside crypto op) */ + gcm = rte_crypto_op_ctod_offset(cop, struct aead_gcm_iv *, + sa->iv_ofs); + ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *, + pofs + sizeof(struct esp_hdr)); + aead_gcm_iv_fill(gcm, ivp[0], sa->salt); + /* CRYPT+AUTH case */ + } else { + sop->cipher.data.offset = pofs + sa->ctp.cipher.offset; + sop->cipher.data.length = clen; + sop->auth.data.offset = pofs + sa->ctp.auth.offset; + sop->auth.data.length = plen - sa->ctp.auth.length; + sop->auth.digest.data = icv->va; + sop->auth.digest.phys_addr = icv->pa; + + /* copy iv from the input packet to the cop */ + ivc = rte_crypto_op_ctod_offset(cop, uint64_t *, sa->iv_ofs); + ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *, + pofs + sizeof(struct esp_hdr)); + rte_memcpy(ivc, ivp, sa->iv_len); + } + return 0; +} + +static inline int32_t +esp_inb_tun_pkt_prepare(const struct rte_ipsec_sa *sa, + const struct replay_sqn *rsn, struct rte_mbuf *mb, + uint32_t hlen, union sym_op_data *icv) +{ + int32_t rc; + uint32_t icv_ofs, plen, sqn; + struct rte_mbuf *ml; + struct esp_hdr *esph; + struct aead_gcm_aad *aad; + + esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen); + sqn = rte_be_to_cpu_32(esph->seq); + rc = esn_inb_check_sqn(rsn, sa, sqn); + if (rc != 0) + return rc; + + plen = mb->pkt_len; + plen = plen - hlen; + + ml = rte_pktmbuf_lastseg(mb); + icv_ofs = ml->data_len - sa->icv_len; + + /* we have to allocate space for AAD somewhere, + * right now - just use free trailing space at the last segment. + * Would probably be more convenient to reserve space for AAD + * inside rte_crypto_op itself + * (again for IV space is already reserved inside cop). + */ + if (sa->aad_len > rte_pktmbuf_tailroom(ml)) + return -ENOSPC; + + icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs); + icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs); + + /* + * fill AAD fields, if any (aad fields are placed after icv), + * right now we support only one AEAD algorithm: AES-GCM. + */ + if (sa->aad_len != 0) { + aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len); + aead_gcm_aad_fill(aad, (const struct gcm_esph_iv *)esph, + IS_ESN(sa)); + } + + return plen; +} + +static inline uint16_t +esp_inb_tun_prepare(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[], + struct rte_crypto_op *cop[], struct rte_mbuf *dr[], uint16_t num) +{ + int32_t rc; + uint32_t i, k, hl; + struct replay_sqn *rsn; + union sym_op_data icv; + + rsn = sa->sqn.inb; + + k = 0; + for (i = 0; i != num; i++) { + + hl = mb[i]->l2_len + mb[i]->l3_len; + rc = esp_inb_tun_pkt_prepare(sa, rsn, mb[i], hl, &icv); + if (rc >= 0) + rc = esp_inb_tun_cop_prepare(cop[k], sa, mb[i], &icv, + hl, rc); + + if (rc == 0) + mb[k++] = mb[i]; + else { + dr[i - k] = mb[i]; + rte_errno = -rc; + } + } + + return k; +} + +static inline void +mbuf_bulk_copy(struct rte_mbuf *dst[], struct rte_mbuf * const src[], + uint32_t num) +{ + uint32_t i; + + for (i = 0; i != num; i++) + dst[i] = src[i]; +} + +static inline void +lksd_none_cop_prepare(const struct rte_ipsec_session *ss, + struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num) +{ + uint32_t i; + struct rte_crypto_sym_op *sop; + + for (i = 0; i != num; i++) { + sop = cop[i]->sym; + cop[i]->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + cop[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + cop[i]->sess_type = RTE_CRYPTO_OP_WITH_SESSION; + sop->m_src = mb[i]; + __rte_crypto_sym_op_attach_sym_session(sop, ss->crypto.ses); + } +} + static uint16_t lksd_none_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num) { - RTE_SET_USED(ss); - RTE_SET_USED(mb); - RTE_SET_USED(cop); - RTE_SET_USED(num); - rte_errno = ENOTSUP; - return 0; + uint32_t n; + struct rte_ipsec_sa *sa; + struct rte_mbuf *dr[num]; + + static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK | + RTE_IPSEC_SATP_MODE_MASK; + + sa = ss->sa; + + switch (sa->type & msk) { + case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4): + case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6): + n = esp_inb_tun_prepare(sa, mb, cop, dr, num); + lksd_none_cop_prepare(ss, mb, cop, n); + break; + case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4): + case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6): + n = esp_outb_tun_prepare(sa, mb, cop, dr, num); + lksd_none_cop_prepare(ss, mb, cop, n); + break; + default: + rte_errno = ENOTSUP; + n = 0; + } + + /* copy not prepared mbufs beyond good ones */ + if (n != num && n != 0) + mbuf_bulk_copy(mb + n, dr, num - n); + + return n; +} + +static inline void +lksd_proto_cop_prepare(const struct rte_ipsec_session *ss, + struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num) +{ + uint32_t i; + struct rte_crypto_sym_op *sop; + + for (i = 0; i != num; i++) { + sop = cop[i]->sym; + cop[i]->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + cop[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + cop[i]->sess_type = RTE_CRYPTO_OP_SECURITY_SESSION; + sop->m_src = mb[i]; + __rte_security_attach_session(sop, ss->security.ses); + } } static uint16_t -lksd_proto_prepare(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], - struct rte_crypto_op *cop[], uint16_t num) +lksd_proto_prepare(const struct rte_ipsec_session *ss, + struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num) { - RTE_SET_USED(ss); - RTE_SET_USED(mb); - RTE_SET_USED(cop); - RTE_SET_USED(num); - rte_errno = ENOTSUP; + lksd_proto_cop_prepare(ss, mb, cop, num); + return num; +} + +static inline int +esp_inb_tun_single_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb, + uint32_t *sqn) +{ + uint32_t hlen, icv_len, tlen; + struct esp_hdr *esph; + struct esp_tail *espt; + struct rte_mbuf *ml; + char *pd; + + if (mb->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) + return -EBADMSG; + + icv_len = sa->icv_len; + + ml = rte_pktmbuf_lastseg(mb); + espt = rte_pktmbuf_mtod_offset(ml, struct esp_tail *, + ml->data_len - icv_len - sizeof(*espt)); + + /* cut of ICV, ESP tail and padding bytes */ + tlen = icv_len + sizeof(*espt) + espt->pad_len; + ml->data_len -= tlen; + mb->pkt_len -= tlen; + + /* cut of L2/L3 headers, ESP header and IV */ + hlen = mb->l2_len + mb->l3_len; + esph = rte_pktmbuf_mtod_offset(mb, struct esp_hdr *, hlen); + rte_pktmbuf_adj(mb, hlen + sa->ctp.cipher.offset); + + /* reset mbuf metatdata: L2/L3 len, packet type */ + mb->packet_type = RTE_PTYPE_UNKNOWN; + mb->l2_len = 0; + mb->l3_len = 0; + + /* clear the PKT_RX_SEC_OFFLOAD flag if set */ + mb->ol_flags &= ~(mb->ol_flags & PKT_RX_SEC_OFFLOAD); + + /* + * check padding and next proto. + * return an error if something is wrong. + */ + + pd = (char *)espt - espt->pad_len; + if (espt->next_proto != sa->proto || + memcmp(pd, esp_pad_bytes, espt->pad_len)) + return -EINVAL; + + *sqn = rte_be_to_cpu_32(esph->seq); return 0; } +static inline uint16_t +esp_inb_rsn_update(struct rte_ipsec_sa *sa, const uint32_t sqn[], + struct rte_mbuf *mb[], struct rte_mbuf *dr[], uint16_t num) +{ + uint32_t i, k; + struct replay_sqn *rsn; + + rsn = sa->sqn.inb; + + k = 0; + for (i = 0; i != num; i++) { + if (esn_inb_update_sqn(rsn, sa, sqn[i]) == 0) + mb[k++] = mb[i]; + else + dr[i - k] = mb[i]; + } + + return k; +} + +static inline uint16_t +esp_inb_tun_pkt_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[], + struct rte_mbuf *dr[], uint16_t num) +{ + uint32_t i, k; + uint32_t sqn[num]; + + /* process packets, extract seq numbers */ + + k = 0; + for (i = 0; i != num; i++) { + /* good packet */ + if (esp_inb_tun_single_pkt_process(sa, mb[i], sqn + k) == 0) + mb[k++] = mb[i]; + /* bad packet, will drop from furhter processing */ + else + dr[i - k] = mb[i]; + } + + /* update seq # and replay winow */ + k = esp_inb_rsn_update(sa, sqn, mb, dr + i - k, k); + + if (k != num) + rte_errno = EBADMSG; + return k; +} + +/* + * helper routine, puts packets with PKT_RX_SEC_OFFLOAD_FAILED set, + * into the death-row. + */ +static inline uint16_t +pkt_flag_process(struct rte_ipsec_sa *sa, struct rte_mbuf *mb[], + struct rte_mbuf *dr[], uint16_t num) +{ + uint32_t i, k; + + RTE_SET_USED(sa); + + k = 0; + for (i = 0; i != num; i++) { + if ((mb[i]->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) == 0) + mb[k++] = mb[i]; + else + dr[i - k] = mb[i]; + } + + if (k != num) + rte_errno = EBADMSG; + return k; +} + +static inline void +inline_outb_mbuf_prepare(const struct rte_ipsec_session *ss, + struct rte_mbuf *mb[], uint16_t num) +{ + uint32_t i, ol_flags; + + ol_flags = ss->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA; + for (i = 0; i != num; i++) { + + mb[i]->ol_flags |= PKT_TX_SEC_OFFLOAD; + if (ol_flags != 0) + rte_security_set_pkt_metadata(ss->security.ctx, + ss->security.ses, mb[i], NULL); + } +} + +static inline uint16_t +inline_outb_tun_pkt_process(struct rte_ipsec_sa *sa, + struct rte_mbuf *mb[], struct rte_mbuf *dr[], uint16_t num) +{ + int32_t rc; + uint32_t i, k, n; + uint64_t sqn; + union sym_op_data icv; + uint64_t iv[IPSEC_MAX_IV_QWORD]; + + n = num; + sqn = esn_outb_update_sqn(sa, &n); + if (n != num) + rte_errno = EOVERFLOW; + + k = 0; + for (i = 0; i != n; i++) { + + gen_iv(iv, sqn + i); + + /* try to update the packet itself */ + rc = esp_outb_tun_pkt_prepare(sa, sqn + i, iv, mb[i], &icv); + + /* success, update mbuf fields */ + if (rc >= 0) + mb[k++] = mb[i]; + /* failure, put packet into the death-row */ + else { + dr[i - k] = mb[i]; + rte_errno = -rc; + } + } + + return k; +} + static uint16_t lksd_none_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], uint16_t num) { - RTE_SET_USED(ss); - RTE_SET_USED(mb); - RTE_SET_USED(num); - rte_errno = ENOTSUP; - return 0; + uint32_t n; + struct rte_ipsec_sa *sa; + struct rte_mbuf *dr[num]; + + static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK | + RTE_IPSEC_SATP_MODE_MASK; + + sa = ss->sa; + + switch (sa->type & msk) { + case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4): + case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6): + n = esp_inb_tun_pkt_process(sa, mb, dr, num); + break; + case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4): + case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6): + n = pkt_flag_process(sa, mb, dr, num); + break; + default: + n = 0; + rte_errno = ENOTSUP; + } + + /* copy not prepared mbufs beyond good ones */ + if (n != num && n != 0) + mbuf_bulk_copy(mb + n, dr, num - n); + + return n; } static uint16_t inline_crypto_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], uint16_t num) { - RTE_SET_USED(ss); - RTE_SET_USED(mb); - RTE_SET_USED(num); - rte_errno = ENOTSUP; - return 0; + uint32_t n; + struct rte_ipsec_sa *sa; + struct rte_mbuf *dr[num]; + + sa = ss->sa; + + static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK | + RTE_IPSEC_SATP_MODE_MASK; + + switch (sa->type & msk) { + case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4): + case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6): + n = esp_inb_tun_pkt_process(sa, mb, dr, num); + break; + case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4): + case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6): + n = inline_outb_tun_pkt_process(sa, mb, dr, num); + inline_outb_mbuf_prepare(ss, mb, n); + break; + default: + n = 0; + rte_errno = ENOTSUP; + } + + /* copy not processed mbufs beyond good ones */ + if (n != num && n != 0) + mbuf_bulk_copy(mb + n, dr, num - n); + + return n; } static uint16_t inline_proto_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], uint16_t num) { - RTE_SET_USED(ss); - RTE_SET_USED(mb); - RTE_SET_USED(num); - rte_errno = ENOTSUP; - return 0; + uint32_t n; + struct rte_ipsec_sa *sa; + struct rte_mbuf *dr[num]; + + sa = ss->sa; + + /* outbound, just set flags and metadata */ + if ((sa->type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_OB) { + inline_outb_mbuf_prepare(ss, mb, num); + return num; + } + + /* inbound, check that HW succesfly processed packets */ + n = pkt_flag_process(sa, mb, dr, num); + + /* copy the bad ones, after the good ones */ + if (n != num && n != 0) + mbuf_bulk_copy(mb + n, dr, num - n); + return n; } static uint16_t lksd_proto_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[], uint16_t num) { - RTE_SET_USED(ss); - RTE_SET_USED(mb); - RTE_SET_USED(num); - rte_errno = ENOTSUP; - return 0; + uint32_t n; + struct rte_ipsec_sa *sa; + struct rte_mbuf *dr[num]; + + sa = ss->sa; + + /* check that HW succesfly processed packets */ + n = pkt_flag_process(sa, mb, dr, num); + + /* copy the bad ones, after the good ones */ + if (n != num && n != 0) + mbuf_bulk_copy(mb + n, dr, num - n); + return n; } const struct rte_ipsec_sa_func * -- 2.13.6