From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 21B67A0613 for ; Fri, 27 Sep 2019 17:50:11 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C8F2A1BF3E; Fri, 27 Sep 2019 17:49:58 +0200 (CEST) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 3AB291BF14 for ; Fri, 27 Sep 2019 17:49:52 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga105.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 27 Sep 2019 08:49:51 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,556,1559545200"; d="scan'208";a="273854554" Received: from adamdybx-mobl.ger.corp.intel.com (HELO localhost.localdomain) ([10.104.14.185]) by orsmga001.jf.intel.com with ESMTP; 27 Sep 2019 08:49:49 -0700 From: Adam Dybkowski To: dev@dpdk.org, fiona.trahe@intel.com, arkadiuszx.kusztal@intel.com, akhil.goyal@nxp.com Cc: Adam Dybkowski Date: Fri, 27 Sep 2019 17:47:39 +0200 Message-Id: <20190927154739.26404-4-adamx.dybkowski@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190927154739.26404-1-adamx.dybkowski@intel.com> References: <20190906144751.3420-1-adamx.dybkowski@intel.com> <20190927154739.26404-1-adamx.dybkowski@intel.com> Subject: [dpdk-dev] [PATCH v2 3/3] crypto/qat: handle Single Pass Crypto Requests on GEN3 QAT X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch improves the performance of AES GCM by using the Single Pass Crypto Request functionality when running on GEN3 QAT. Falls back to classic chained mode on older hardware. Signed-off-by: Adam Dybkowski --- doc/guides/rel_notes/release_19_11.rst | 7 +++ drivers/crypto/qat/qat_sym.c | 13 +++- drivers/crypto/qat/qat_sym_session.c | 86 ++++++++++++++++++++++++-- drivers/crypto/qat/qat_sym_session.h | 9 ++- 4 files changed, 107 insertions(+), 8 deletions(-) diff --git a/doc/guides/rel_notes/release_19_11.rst b/doc/guides/rel_notes/release_19_11.rst index 573683da4..4817b7f23 100644 --- a/doc/guides/rel_notes/release_19_11.rst +++ b/doc/guides/rel_notes/release_19_11.rst @@ -61,6 +61,13 @@ New Features Added stateful decompression support in the Intel QuickAssist Technology PMD. Please note that stateful compression is not supported. +* **Enabled Single Pass GCM acceleration on QAT GEN3.** + + Added support for Single Pass GCM, available on QAT GEN3 only (Intel + QuickAssist Technology C4xxx). It is automatically chosen instead of the + classic chained mode when running on QAT GEN3, significantly improving + the performance of AES GCM operations. + Removed Items ------------- diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c index 46ef27a6d..5ff4aa1e5 100644 --- a/drivers/crypto/qat/qat_sym.c +++ b/drivers/crypto/qat/qat_sym.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015-2018 Intel Corporation + * Copyright(c) 2015-2019 Intel Corporation */ #include @@ -12,6 +12,7 @@ #include "qat_sym.h" + /** Decrypt a single partial block * Depends on openssl libcrypto * Uses ECB+XOR to do CFB encryption, same result, more performant @@ -195,7 +196,8 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg, rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req)); qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op; cipher_param = (void *)&qat_req->serv_specif_rqpars; - auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); + auth_param = (void *)((uint8_t *)cipher_param + + ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET); if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER || ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) { @@ -593,6 +595,13 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg, qat_req->comn_mid.dest_data_addr = dst_buf_start; } + /* Handle Single-Pass GCM */ + if (ctx->is_single_pass) { + cipher_param->spc_aad_addr = op->sym->aead.aad.phys_addr; + cipher_param->spc_auth_res_addr = + op->sym->aead.digest.phys_addr; + } + #if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req, sizeof(struct icp_qat_fw_la_bulk_req)); diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c index e5167b3fa..7d0f4a69d 100644 --- a/drivers/crypto/qat/qat_sym_session.c +++ b/drivers/crypto/qat/qat_sym_session.c @@ -450,7 +450,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, break; case ICP_QAT_FW_LA_CMD_CIPHER_HASH: if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { - ret = qat_sym_session_configure_aead(xform, + ret = qat_sym_session_configure_aead(dev, xform, session); if (ret < 0) return ret; @@ -467,7 +467,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, break; case ICP_QAT_FW_LA_CMD_HASH_CIPHER: if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { - ret = qat_sym_session_configure_aead(xform, + ret = qat_sym_session_configure_aead(dev, xform, session); if (ret < 0) return ret; @@ -503,6 +503,72 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, return 0; } +static int +qat_sym_session_handle_single_pass(struct qat_sym_dev_private *internals, + struct qat_sym_session *session, + struct rte_crypto_aead_xform *aead_xform) +{ + enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen; + + if (qat_dev_gen == QAT_GEN3 && + aead_xform->iv.length == QAT_AES_GCM_SPC_IV_SIZE) { + /* Use faster Single-Pass GCM */ + struct icp_qat_fw_la_cipher_req_params *cipher_param = + (void *) &session->fw_req.serv_specif_rqpars; + + session->is_single_pass = 1; + session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER; + session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE; + session->cipher_iv.offset = aead_xform->iv.offset; + session->cipher_iv.length = aead_xform->iv.length; + if (qat_sym_session_aead_create_cd_cipher(session, + aead_xform->key.data, aead_xform->key.length)) + return -EINVAL; + session->aad_len = aead_xform->aad_length; + session->digest_length = aead_xform->digest_length; + if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) { + session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; + session->auth_op = ICP_QAT_HW_AUTH_GENERATE; + ICP_QAT_FW_LA_RET_AUTH_SET( + session->fw_req.comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_RET_AUTH_RES); + } else { + session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; + session->auth_op = ICP_QAT_HW_AUTH_VERIFY; + ICP_QAT_FW_LA_CMP_AUTH_SET( + session->fw_req.comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_CMP_AUTH_RES); + } + ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET( + session->fw_req.comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_SINGLE_PASS_PROTO); + ICP_QAT_FW_LA_PROTO_SET( + session->fw_req.comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_NO_PROTO); + ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( + session->fw_req.comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); + session->fw_req.comn_hdr.service_cmd_id = + ICP_QAT_FW_LA_CMD_CIPHER; + session->cd.cipher.cipher_config.val = + ICP_QAT_HW_CIPHER_CONFIG_BUILD( + ICP_QAT_HW_CIPHER_AEAD_MODE, + session->qat_cipher_alg, + ICP_QAT_HW_CIPHER_NO_CONVERT, + session->qat_dir); + QAT_FIELD_SET(session->cd.cipher.cipher_config.val, + aead_xform->digest_length, + QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS, + QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK); + session->cd.cipher.cipher_config.reserved = + ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER( + aead_xform->aad_length); + cipher_param->spc_aad_sz = aead_xform->aad_length; + cipher_param->spc_auth_res_sz = aead_xform->digest_length; + } + return 0; +} + int qat_sym_session_configure_auth(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, @@ -646,7 +712,8 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev, } int -qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform, +qat_sym_session_configure_aead(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, struct qat_sym_session *session) { struct rte_crypto_aead_xform *aead_xform = &xform->aead; @@ -684,6 +751,17 @@ qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform, return -EINVAL; } + session->is_single_pass = 0; + if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) { + /* Use faster Single-Pass GCM if possible */ + int res = qat_sym_session_handle_single_pass( + dev->data->dev_private, session, aead_xform); + if (res < 0) + return res; + if (session->is_single_pass) + return 0; + } + if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT && aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) || (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT && @@ -1444,7 +1522,7 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, struct icp_qat_fw_la_auth_req_params *auth_param = (struct icp_qat_fw_la_auth_req_params *) ((char *)&req_tmpl->serv_specif_rqpars + - sizeof(struct icp_qat_fw_la_cipher_req_params)); + ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET); uint16_t state1_size = 0, state2_size = 0; uint16_t hash_offset, cd_size; uint32_t *aad_len = NULL; diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h index ce1ca5af8..98985d686 100644 --- a/drivers/crypto/qat/qat_sym_session.h +++ b/drivers/crypto/qat/qat_sym_session.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015-2018 Intel Corporation + * Copyright(c) 2015-2019 Intel Corporation */ #ifndef _QAT_SYM_SESSION_H_ #define _QAT_SYM_SESSION_H_ @@ -25,6 +25,9 @@ #define QAT_3DES_KEY_SZ_OPT2 16 /* K3=K1 */ #define QAT_3DES_KEY_SZ_OPT3 8 /* K1=K2=K3 */ +/* 96-bit case of IV for CCP/GCM single pass algorithm */ +#define QAT_AES_GCM_SPC_IV_SIZE 12 + #define QAT_AES_HW_CONFIG_CBC_ENC(alg) \ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ @@ -78,6 +81,7 @@ struct qat_sym_session { rte_spinlock_t lock; /* protects this struct */ enum qat_device_gen min_qat_dev_gen; uint8_t aes_cmac; + uint8_t is_single_pass; }; int @@ -91,7 +95,8 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, void *session_private); int -qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform, +qat_sym_session_configure_aead(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, struct qat_sym_session *session); int -- 2.17.1