From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 3D4B4A0534; Tue, 4 Feb 2020 14:14:12 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 2D7AD1C1CC; Tue, 4 Feb 2020 14:13:20 +0100 (CET) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 10D271C1BC for ; Tue, 4 Feb 2020 14:13:17 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by orsmga105.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 04 Feb 2020 05:13:17 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.70,401,1574150400"; d="scan'208";a="429800450" Received: from msmoczyx-mobl.ger.corp.intel.com ([10.103.102.190]) by fmsmga005.fm.intel.com with ESMTP; 04 Feb 2020 05:13:16 -0800 From: Marcin Smoczynski To: akhil.goyal@nxp.com, konstantin.ananyev@intel.com, roy.fan.zhang@intel.com, declan.doherty@intel.com, radu.nicolau@intel.com, pablo.de.lara.guarch@intel.com Cc: dev@dpdk.org, Marcin Smoczynski Date: Tue, 4 Feb 2020 14:12:56 +0100 Message-Id: <20200204131258.17632-7-marcinx.smoczynski@intel.com> X-Mailer: git-send-email 2.21.0.windows.1 In-Reply-To: <20200204131258.17632-1-marcinx.smoczynski@intel.com> References: <20200128142220.16644-1-marcinx.smoczynski@intel.com> <20200204131258.17632-1-marcinx.smoczynski@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH v6 6/8] examples/ipsec-secgw: cpu crypto support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support for CPU accelerated crypto. 'cpu-crypto' SA type has been introduced in configuration allowing to use abovementioned acceleration. Legacy mode is not currently supported. Signed-off-by: Konstantin Ananyev Signed-off-by: Marcin Smoczynski Acked-by: Fan Zhang --- examples/ipsec-secgw/ipsec.c | 25 ++++- examples/ipsec-secgw/ipsec_process.c | 136 +++++++++++++++++---------- examples/ipsec-secgw/sa.c | 30 ++++-- 3 files changed, 131 insertions(+), 60 deletions(-) diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c index d4b57121a..6e8120702 100644 --- a/examples/ipsec-secgw/ipsec.c +++ b/examples/ipsec-secgw/ipsec.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2020 Intel Corporation */ #include #include @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -86,7 +87,8 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa, ipsec_ctx->tbl[cdev_id_qp].id, ipsec_ctx->tbl[cdev_id_qp].qp); - if (ips->type != RTE_SECURITY_ACTION_TYPE_NONE) { + if (ips->type != RTE_SECURITY_ACTION_TYPE_NONE && + ips->type != RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) { struct rte_security_session_conf sess_conf = { .action_type = ips->type, .protocol = RTE_SECURITY_PROTOCOL_IPSEC, @@ -126,6 +128,18 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa, return -1; } } else { + if (ips->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) { + struct rte_cryptodev_info info; + uint16_t cdev_id; + + cdev_id = ipsec_ctx->tbl[cdev_id_qp].id; + rte_cryptodev_info_get(cdev_id, &info); + if (!(info.feature_flags & + RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO)) + return -ENOTSUP; + + ips->crypto.dev_id = cdev_id; + } ips->crypto.ses = rte_cryptodev_sym_session_create( ipsec_ctx->session_pool); rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id, @@ -476,6 +490,13 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, rte_security_attach_session(&priv->cop, ips->security.ses); break; + + case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO: + RTE_LOG(ERR, IPSEC, "CPU crypto is not supported by the" + " legacy mode."); + rte_pktmbuf_free(pkts[i]); + continue; + case RTE_SECURITY_ACTION_TYPE_NONE: priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; diff --git a/examples/ipsec-secgw/ipsec_process.c b/examples/ipsec-secgw/ipsec_process.c index 2eb5c8b34..bb2f2b82d 100644 --- a/examples/ipsec-secgw/ipsec_process.c +++ b/examples/ipsec-secgw/ipsec_process.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2020 Intel Corporation */ #include #include @@ -92,7 +92,8 @@ fill_ipsec_session(struct rte_ipsec_session *ss, struct ipsec_ctx *ctx, int32_t rc; /* setup crypto section */ - if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE) { + if (ss->type == RTE_SECURITY_ACTION_TYPE_NONE || + ss->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) { RTE_ASSERT(ss->crypto.ses == NULL); rc = create_lookaside_session(ctx, sa, ss); if (rc != 0) @@ -215,6 +216,62 @@ ipsec_prepare_crypto_group(struct ipsec_ctx *ctx, struct ipsec_sa *sa, return k; } +/* + * helper routine for inline and cpu(synchronous) processing + * this is just to satisfy inbound_sa_check() and get_hop_for_offload_pkt(). + * Should be removed in future. + */ +static inline void +prep_process_group(void *sa, struct rte_mbuf *mb[], uint32_t cnt) +{ + uint32_t j; + struct ipsec_mbuf_metadata *priv; + + for (j = 0; j != cnt; j++) { + priv = get_priv(mb[j]); + priv->sa = sa; + } +} + +/* + * finish processing of packets successfully decrypted by an inline processor + */ +static uint32_t +ipsec_process_inline_group(struct rte_ipsec_session *ips, void *sa, + struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t cnt) +{ + uint64_t satp; + uint32_t k; + + /* get SA type */ + satp = rte_ipsec_sa_type(ips->sa); + prep_process_group(sa, mb, cnt); + + k = rte_ipsec_pkt_process(ips, mb, cnt); + copy_to_trf(trf, satp, mb, k); + return k; +} + +/* + * process packets synchronously + */ +static uint32_t +ipsec_process_cpu_group(struct rte_ipsec_session *ips, void *sa, + struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t cnt) +{ + uint64_t satp; + uint32_t k; + + /* get SA type */ + satp = rte_ipsec_sa_type(ips->sa); + prep_process_group(sa, mb, cnt); + + k = rte_ipsec_pkt_cpu_prepare(ips, mb, cnt); + k = rte_ipsec_pkt_process(ips, mb, k); + copy_to_trf(trf, satp, mb, k); + return k; +} + /* * Process ipsec packets. * If packet belong to SA that is subject of inline-crypto, @@ -225,10 +282,8 @@ ipsec_prepare_crypto_group(struct ipsec_ctx *ctx, struct ipsec_sa *sa, void ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf) { - uint64_t satp; - uint32_t i, j, k, n; + uint32_t i, k, n; struct ipsec_sa *sa; - struct ipsec_mbuf_metadata *priv; struct rte_ipsec_group *pg; struct rte_ipsec_session *ips; struct rte_ipsec_group grp[RTE_DIM(trf->ipsec.pkts)]; @@ -236,10 +291,17 @@ ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf) n = sa_group(trf->ipsec.saptr, trf->ipsec.pkts, grp, trf->ipsec.num); for (i = 0; i != n; i++) { + pg = grp + i; sa = ipsec_mask_saptr(pg->id.ptr); - ips = ipsec_get_primary_session(sa); + /* fallback to cryptodev with RX packets which inline + * processor was unable to process + */ + if (sa != NULL) + ips = (pg->id.val & IPSEC_SA_OFFLOAD_FALLBACK_FLAG) ? + ipsec_get_fallback_session(sa) : + ipsec_get_primary_session(sa); /* no valid HW session for that SA, try to create one */ if (sa == NULL || (ips->crypto.ses == NULL && @@ -247,50 +309,26 @@ ipsec_process(struct ipsec_ctx *ctx, struct ipsec_traffic *trf) k = 0; /* process packets inline */ - else if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO || - ips->type == - RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) { - - /* get SA type */ - satp = rte_ipsec_sa_type(ips->sa); - - /* - * This is just to satisfy inbound_sa_check() - * and get_hop_for_offload_pkt(). - * Should be removed in future. - */ - for (j = 0; j != pg->cnt; j++) { - priv = get_priv(pg->m[j]); - priv->sa = sa; + else { + switch (ips->type) { + /* enqueue packets to crypto dev */ + case RTE_SECURITY_ACTION_TYPE_NONE: + case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: + k = ipsec_prepare_crypto_group(ctx, sa, ips, + pg->m, pg->cnt); + break; + case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: + case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: + k = ipsec_process_inline_group(ips, sa, + trf, pg->m, pg->cnt); + break; + case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO: + k = ipsec_process_cpu_group(ips, sa, + trf, pg->m, pg->cnt); + break; + default: + k = 0; } - - /* fallback to cryptodev with RX packets which inline - * processor was unable to process - */ - if (pg->id.val & IPSEC_SA_OFFLOAD_FALLBACK_FLAG) { - /* offload packets to cryptodev */ - struct rte_ipsec_session *fallback; - - fallback = ipsec_get_fallback_session(sa); - if (fallback->crypto.ses == NULL && - fill_ipsec_session(fallback, ctx, sa) - != 0) - k = 0; - else - k = ipsec_prepare_crypto_group(ctx, sa, - fallback, pg->m, pg->cnt); - } else { - /* finish processing of packets successfully - * decrypted by an inline processor - */ - k = rte_ipsec_pkt_process(ips, pg->m, pg->cnt); - copy_to_trf(trf, satp, pg->m, k); - - } - /* enqueue packets to crypto dev */ - } else { - k = ipsec_prepare_crypto_group(ctx, sa, ips, pg->m, - pg->cnt); } /* drop packets that cannot be enqueued/processed */ diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c index c75a5a15f..e9e8d624c 100644 --- a/examples/ipsec-secgw/sa.c +++ b/examples/ipsec-secgw/sa.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2020 Intel Corporation */ /* @@ -586,6 +586,8 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL; else if (strcmp(tokens[ti], "no-offload") == 0) ips->type = RTE_SECURITY_ACTION_TYPE_NONE; + else if (strcmp(tokens[ti], "cpu-crypto") == 0) + ips->type = RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO; else { APP_CHECK(0, status, "Invalid input \"%s\"", tokens[ti]); @@ -679,10 +681,12 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, if (status->status < 0) return; - if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE) && (portid_p == 0)) + if ((ips->type != RTE_SECURITY_ACTION_TYPE_NONE && ips->type != + RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) && (portid_p == 0)) printf("Missing portid option, falling back to non-offload\n"); - if (!type_p || !portid_p) { + if (!type_p || (!portid_p && ips->type != + RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO)) { ips->type = RTE_SECURITY_ACTION_TYPE_NONE; rule->portid = -1; } @@ -768,15 +772,25 @@ print_one_sa_rule(const struct ipsec_sa *sa, int inbound) case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: printf("lookaside-protocol-offload "); break; + case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO: + printf("cpu-crypto-accelerated"); + break; } fallback_ips = &sa->sessions[IPSEC_SESSION_FALLBACK]; if (fallback_ips != NULL && sa->fallback_sessions > 0) { printf("inline fallback: "); - if (fallback_ips->type == RTE_SECURITY_ACTION_TYPE_NONE) + switch (fallback_ips->type) { + case RTE_SECURITY_ACTION_TYPE_NONE: printf("lookaside-none"); - else + break; + case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO: + printf("cpu-crypto-accelerated"); + break; + default: printf("invalid"); + break; + } } printf("\n"); } @@ -975,7 +989,6 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], return -EINVAL; } - switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) { case IP4_TUNNEL: sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4); @@ -1026,7 +1039,6 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], return -EINVAL; } } - print_one_sa_rule(sa, inbound); } else { switch (sa->cipher_algo) { case RTE_CRYPTO_CIPHER_NULL: @@ -1091,9 +1103,9 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b; sa_ctx->xf[idx].b.next = NULL; sa->xforms = &sa_ctx->xf[idx].a; - - print_one_sa_rule(sa, inbound); } + + print_one_sa_rule(sa, inbound); } return 0; -- 2.17.1