From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id D5121A04A5; Wed, 9 Feb 2022 15:00:42 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id C70DE41C28; Wed, 9 Feb 2022 15:00:38 +0100 (CET) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by mails.dpdk.org (Postfix) with ESMTP id 2D07C411B6 for ; Wed, 9 Feb 2022 15:00:37 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1644415237; x=1675951237; h=from:to:cc:subject:date:message-id:in-reply-to: references; bh=90q7OEMTuxhzQecGW98IbWx3Pa9ls+YOcarbIuyTV90=; b=EhK89Le/d+xhlvYu9kA0fvuF+Y2Ra+WoXGdlNejI5CJp/7nob1mynQ/7 +aFKhYZLLZgea8vTtTfAoeVT7DiHIJjxggaSh1qRs9n8zBIrWTFP1KGPJ 9Z4yfzLGB/MLOCtlp8JEeuTxRn9b+MGnFZtKrfyfP+GmIhTGVxe7OkJMu /EUaXTY5YrdnExyI4VatwQYedqx8Y01izF6lri8+UVV9RG0d0r68WWG07 /QUHUj9kJ3SwJH5AzB+Ap2D9+H0CQNZu1Mxi6Pk+shU1XZKR8ehKH7vs/ m2ObVpT/1FGOOUYRiFaGO8q5Q7iIIBLkjh+T5A8zY/+bc941Ealj6n76h w==; X-IronPort-AV: E=McAfee;i="6200,9189,10252"; a="229178568" X-IronPort-AV: E=Sophos;i="5.88,355,1635231600"; d="scan'208";a="229178568" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 09 Feb 2022 06:00:36 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.88,355,1635231600"; d="scan'208";a="525996007" Received: from silpixa00400308.ir.intel.com ([10.237.214.95]) by orsmga007.jf.intel.com with ESMTP; 09 Feb 2022 06:00:31 -0800 From: Arek Kusztal To: dev@dpdk.org Cc: gakhil@marvell.com, roy.fan.zhang@intel.com, Arek Kusztal Subject: [PATCH v2 1/5] crypto/qat: refactor asymmetric crypto functions Date: Wed, 9 Feb 2022 14:00:16 +0000 Message-Id: <20220209140020.19365-2-arkadiuszx.kusztal@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20220209140020.19365-1-arkadiuszx.kusztal@intel.com> References: <20220209140020.19365-1-arkadiuszx.kusztal@intel.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org This commit refactors asummetric crypto functions in Intel QuickAssist Technology PMD. Functions right now are shorter and far easier readable, plus it facilitates addition of new algorithms. Signed-off-by: Arek Kusztal --- doc/guides/cryptodevs/qat.rst | 1 + drivers/common/qat/qat_adf/qat_pke.h | 215 ++++ .../qat/qat_adf/qat_pke_functionality_arrays.h | 79 -- drivers/crypto/qat/dev/qat_asym_pmd_gen1.c | 1 - drivers/crypto/qat/qat_asym.c | 1123 +++++++++----------- drivers/crypto/qat/qat_asym.h | 2 +- 6 files changed, 710 insertions(+), 711 deletions(-) create mode 100644 drivers/common/qat/qat_adf/qat_pke.h delete mode 100644 drivers/common/qat/qat_adf/qat_pke_functionality_arrays.h diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst index 666a01df33..4428b89195 100644 --- a/doc/guides/cryptodevs/qat.rst +++ b/doc/guides/cryptodevs/qat.rst @@ -174,6 +174,7 @@ The QAT ASYM PMD has support for: * ``RTE_CRYPTO_ASYM_XFORM_MODEX`` * ``RTE_CRYPTO_ASYM_XFORM_MODINV`` +* ``RTE_CRYPTO_ASYM_XFORM_RSA`` Limitations ~~~~~~~~~~~ diff --git a/drivers/common/qat/qat_adf/qat_pke.h b/drivers/common/qat/qat_adf/qat_pke.h new file mode 100644 index 0000000000..82bb1ee55e --- /dev/null +++ b/drivers/common/qat/qat_adf/qat_pke.h @@ -0,0 +1,215 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2021-2022 Intel Corporation + */ + +#ifndef _QAT_PKE_FUNCTIONALITY_ARRAYS_H_ +#define _QAT_PKE_FUNCTIONALITY_ARRAYS_H_ + +#include "icp_qat_fw_mmp_ids.h" + +/* + * Modular exponentiation functionality IDs + */ + +struct qat_asym_function { + uint32_t func_id; + uint32_t bytesize; +}; + +static struct qat_asym_function +get_modexp_function(struct rte_crypto_asym_xform *xform) +{ + struct qat_asym_function qat_function = { }; + + if (xform->modex.modulus.length <= 64) { + qat_function.func_id = MATHS_MODEXP_L512; + qat_function.bytesize = 64; + } else if (xform->modex.modulus.length <= 128) { + qat_function.func_id = MATHS_MODEXP_L1024; + qat_function.bytesize = 128; + } else if (xform->modex.modulus.length <= 192) { + qat_function.func_id = MATHS_MODEXP_L1536; + qat_function.bytesize = 192; + } else if (xform->modex.modulus.length <= 256) { + qat_function.func_id = MATHS_MODEXP_L2048; + qat_function.bytesize = 256; + } else if (xform->modex.modulus.length <= 320) { + qat_function.func_id = MATHS_MODEXP_L2560; + qat_function.bytesize = 320; + } else if (xform->modex.modulus.length <= 384) { + qat_function.func_id = MATHS_MODEXP_L3072; + qat_function.bytesize = 384; + } else if (xform->modex.modulus.length <= 448) { + qat_function.func_id = MATHS_MODEXP_L3584; + qat_function.bytesize = 448; + } else if (xform->modex.modulus.length <= 512) { + qat_function.func_id = MATHS_MODEXP_L4096; + qat_function.bytesize = 512; + } + return qat_function; +} + +static struct qat_asym_function +get_modinv_function(struct rte_crypto_asym_xform *xform) +{ + struct qat_asym_function qat_function = { }; + + if (xform->modinv.modulus.data[ + xform->modinv.modulus.length - 1] & 0x01) { + if (xform->modex.modulus.length <= 16) { + qat_function.func_id = MATHS_MODINV_ODD_L128; + qat_function.bytesize = 16; + } else if (xform->modex.modulus.length <= 24) { + qat_function.func_id = MATHS_MODINV_ODD_L192; + qat_function.bytesize = 24; + } else if (xform->modex.modulus.length <= 32) { + qat_function.func_id = MATHS_MODINV_ODD_L256; + qat_function.bytesize = 32; + } else if (xform->modex.modulus.length <= 48) { + qat_function.func_id = MATHS_MODINV_ODD_L384; + qat_function.bytesize = 48; + } else if (xform->modex.modulus.length <= 64) { + qat_function.func_id = MATHS_MODINV_ODD_L512; + qat_function.bytesize = 64; + } else if (xform->modex.modulus.length <= 96) { + qat_function.func_id = MATHS_MODINV_ODD_L768; + qat_function.bytesize = 96; + } else if (xform->modex.modulus.length <= 128) { + qat_function.func_id = MATHS_MODINV_ODD_L1024; + qat_function.bytesize = 128; + } else if (xform->modex.modulus.length <= 192) { + qat_function.func_id = MATHS_MODINV_ODD_L1536; + qat_function.bytesize = 192; + } else if (xform->modex.modulus.length <= 256) { + qat_function.func_id = MATHS_MODINV_ODD_L2048; + qat_function.bytesize = 256; + } else if (xform->modex.modulus.length <= 384) { + qat_function.func_id = MATHS_MODINV_ODD_L3072; + qat_function.bytesize = 384; + } else if (xform->modex.modulus.length <= 512) { + qat_function.func_id = MATHS_MODINV_ODD_L4096; + qat_function.bytesize = 512; + } + } else { + if (xform->modex.modulus.length <= 16) { + qat_function.func_id = MATHS_MODINV_EVEN_L128; + qat_function.bytesize = 16; + } else if (xform->modex.modulus.length <= 24) { + qat_function.func_id = MATHS_MODINV_EVEN_L192; + qat_function.bytesize = 24; + } else if (xform->modex.modulus.length <= 32) { + qat_function.func_id = MATHS_MODINV_EVEN_L256; + qat_function.bytesize = 32; + } else if (xform->modex.modulus.length <= 48) { + qat_function.func_id = MATHS_MODINV_EVEN_L384; + qat_function.bytesize = 48; + } else if (xform->modex.modulus.length <= 64) { + qat_function.func_id = MATHS_MODINV_EVEN_L512; + qat_function.bytesize = 64; + } else if (xform->modex.modulus.length <= 96) { + qat_function.func_id = MATHS_MODINV_EVEN_L768; + qat_function.bytesize = 96; + } else if (xform->modex.modulus.length <= 128) { + qat_function.func_id = MATHS_MODINV_EVEN_L1024; + qat_function.bytesize = 128; + } else if (xform->modex.modulus.length <= 192) { + qat_function.func_id = MATHS_MODINV_EVEN_L1536; + qat_function.bytesize = 192; + } else if (xform->modex.modulus.length <= 256) { + qat_function.func_id = MATHS_MODINV_EVEN_L2048; + qat_function.bytesize = 256; + } else if (xform->modex.modulus.length <= 384) { + qat_function.func_id = MATHS_MODINV_EVEN_L3072; + qat_function.bytesize = 384; + } else if (xform->modex.modulus.length <= 512) { + qat_function.func_id = MATHS_MODINV_EVEN_L4096; + qat_function.bytesize = 512; + } + } + + return qat_function; +} + +static struct qat_asym_function +get_rsa_enc_function(struct rte_crypto_asym_xform *xform) +{ + struct qat_asym_function qat_function = { }; + + if (xform->rsa.n.length <= 64) { + qat_function.func_id = PKE_RSA_EP_512; + qat_function.bytesize = 64; + } else if (xform->rsa.n.length <= 128) { + qat_function.func_id = PKE_RSA_EP_1024; + qat_function.bytesize = 128; + } else if (xform->rsa.n.length <= 192) { + qat_function.func_id = PKE_RSA_EP_1536; + qat_function.bytesize = 192; + } else if (xform->rsa.n.length <= 256) { + qat_function.func_id = PKE_RSA_EP_2048; + qat_function.bytesize = 256; + } else if (xform->rsa.n.length <= 384) { + qat_function.func_id = PKE_RSA_EP_3072; + qat_function.bytesize = 384; + } else if (xform->rsa.n.length <= 512) { + qat_function.func_id = PKE_RSA_EP_4096; + qat_function.bytesize = 512; + } + return qat_function; +} + +static struct qat_asym_function +get_rsa_dec_function(struct rte_crypto_asym_xform *xform) +{ + struct qat_asym_function qat_function = { }; + + if (xform->rsa.n.length <= 64) { + qat_function.func_id = PKE_RSA_DP1_512; + qat_function.bytesize = 64; + } else if (xform->rsa.n.length <= 128) { + qat_function.func_id = PKE_RSA_DP1_1024; + qat_function.bytesize = 128; + } else if (xform->rsa.n.length <= 192) { + qat_function.func_id = PKE_RSA_DP1_1536; + qat_function.bytesize = 192; + } else if (xform->rsa.n.length <= 256) { + qat_function.func_id = PKE_RSA_DP1_2048; + qat_function.bytesize = 256; + } else if (xform->rsa.n.length <= 384) { + qat_function.func_id = PKE_RSA_DP1_3072; + qat_function.bytesize = 384; + } else if (xform->rsa.n.length <= 512) { + qat_function.func_id = PKE_RSA_DP1_4096; + qat_function.bytesize = 512; + } + return qat_function; +} + +static struct qat_asym_function +get_rsa_crt_function(struct rte_crypto_asym_xform *xform) +{ + struct qat_asym_function qat_function = { }; + int nlen = xform->rsa.qt.p.length * 2; + + if (nlen <= 64) { + qat_function.func_id = PKE_RSA_DP2_512; + qat_function.bytesize = 64; + } else if (nlen <= 128) { + qat_function.func_id = PKE_RSA_DP2_1024; + qat_function.bytesize = 128; + } else if (nlen <= 192) { + qat_function.func_id = PKE_RSA_DP2_1536; + qat_function.bytesize = 192; + } else if (nlen <= 256) { + qat_function.func_id = PKE_RSA_DP2_2048; + qat_function.bytesize = 256; + } else if (nlen <= 384) { + qat_function.func_id = PKE_RSA_DP2_3072; + qat_function.bytesize = 384; + } else if (nlen <= 512) { + qat_function.func_id = PKE_RSA_DP2_4096; + qat_function.bytesize = 512; + } + return qat_function; +} + +#endif diff --git a/drivers/common/qat/qat_adf/qat_pke_functionality_arrays.h b/drivers/common/qat/qat_adf/qat_pke_functionality_arrays.h deleted file mode 100644 index 42ffbbadd0..0000000000 --- a/drivers/common/qat/qat_adf/qat_pke_functionality_arrays.h +++ /dev/null @@ -1,79 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2019 Intel Corporation - */ - -#ifndef _QAT_PKE_FUNCTIONALITY_ARRAYS_H_ -#define _QAT_PKE_FUNCTIONALITY_ARRAYS_H_ - -#include "icp_qat_fw_mmp_ids.h" - -/* - * Modular exponentiation functionality IDs - */ -static const uint32_t MOD_EXP_SIZE[][2] = { - { 512, MATHS_MODEXP_L512 }, - { 1024, MATHS_MODEXP_L1024 }, - { 1536, MATHS_MODEXP_L1536 }, - { 2048, MATHS_MODEXP_L2048 }, - { 2560, MATHS_MODEXP_L2560 }, - { 3072, MATHS_MODEXP_L3072 }, - { 3584, MATHS_MODEXP_L3584 }, - { 4096, MATHS_MODEXP_L4096 } -}; - -static const uint32_t MOD_INV_IDS_ODD[][2] = { - { 128, MATHS_MODINV_ODD_L128 }, - { 192, MATHS_MODINV_ODD_L192 }, - { 256, MATHS_MODINV_ODD_L256 }, - { 384, MATHS_MODINV_ODD_L384 }, - { 512, MATHS_MODINV_ODD_L512 }, - { 768, MATHS_MODINV_ODD_L768 }, - { 1024, MATHS_MODINV_ODD_L1024 }, - { 1536, MATHS_MODINV_ODD_L1536 }, - { 2048, MATHS_MODINV_ODD_L2048 }, - { 3072, MATHS_MODINV_ODD_L3072 }, - { 4096, MATHS_MODINV_ODD_L4096 }, -}; - -static const uint32_t MOD_INV_IDS_EVEN[][2] = { - { 128, MATHS_MODINV_EVEN_L128 }, - { 192, MATHS_MODINV_EVEN_L192 }, - { 256, MATHS_MODINV_EVEN_L256 }, - { 384, MATHS_MODINV_EVEN_L384 }, - { 512, MATHS_MODINV_EVEN_L512 }, - { 768, MATHS_MODINV_EVEN_L768 }, - { 1024, MATHS_MODINV_EVEN_L1024 }, - { 1536, MATHS_MODINV_EVEN_L1536 }, - { 2048, MATHS_MODINV_EVEN_L2048 }, - { 3072, MATHS_MODINV_EVEN_L3072 }, - { 4096, MATHS_MODINV_EVEN_L4096 }, -}; - -static const uint32_t RSA_ENC_IDS[][2] = { - { 512, PKE_RSA_EP_512 }, - { 1024, PKE_RSA_EP_1024 }, - { 1536, PKE_RSA_EP_1536 }, - { 2048, PKE_RSA_EP_2048 }, - { 3072, PKE_RSA_EP_3072 }, - { 4096, PKE_RSA_EP_4096 }, -}; - -static const uint32_t RSA_DEC_IDS[][2] = { - { 512, PKE_RSA_DP1_512 }, - { 1024, PKE_RSA_DP1_1024 }, - { 1536, PKE_RSA_DP1_1536 }, - { 2048, PKE_RSA_DP1_2048 }, - { 3072, PKE_RSA_DP1_3072 }, - { 4096, PKE_RSA_DP1_4096 }, -}; - -static const uint32_t RSA_DEC_CRT_IDS[][2] = { - { 512, PKE_RSA_DP2_512 }, - { 1024, PKE_RSA_DP2_1024 }, - { 1536, PKE_RSA_DP2_1536 }, - { 2048, PKE_RSA_DP2_2048 }, - { 3072, PKE_RSA_DP2_3072 }, - { 4096, PKE_RSA_DP2_4096 }, -}; - -#endif diff --git a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c index 9ed1f21d9d..c3cee42c28 100644 --- a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c +++ b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c @@ -7,7 +7,6 @@ #include "qat_asym.h" #include "qat_crypto.h" #include "qat_crypto_pmd_gens.h" -#include "qat_pke_functionality_arrays.h" struct rte_cryptodev_ops qat_asym_crypto_ops_gen1 = { /* Device related operations */ diff --git a/drivers/crypto/qat/qat_asym.c b/drivers/crypto/qat/qat_asym.c index 09d8761c5f..9b4c579a9d 100644 --- a/drivers/crypto/qat/qat_asym.c +++ b/drivers/crypto/qat/qat_asym.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2019 Intel Corporation + * Copyright(c) 2019-2022 Intel Corporation */ #include @@ -7,64 +7,54 @@ #include "qat_asym.h" #include "icp_qat_fw_pke.h" #include "icp_qat_fw.h" -#include "qat_pke_functionality_arrays.h" +#include "qat_pke.h" -#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg)) - -static int qat_asym_get_sz_and_func_id(const uint32_t arr[][2], - size_t arr_sz, size_t *size, uint32_t *func_id) -{ - size_t i; - - for (i = 0; i < arr_sz; i++) { - if (*size <= arr[i][0]) { - *size = arr[i][0]; - *func_id = arr[i][1]; - return 0; - } - } - return -1; -} +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG +#define HEXDUMP(name, where, size) QAT_DP_HEXDUMP_LOG(DEBUG, name, \ + where, size) +#define HEXDUMP_OFF(name, where, size, idx) QAT_DP_HEXDUMP_LOG(DEBUG, name, \ + &where[idx * size], size) +#else +#define HEXDUMP(name, where, size) +#define HEXDUMP_OFF(name, where, size, idx) +#endif -static inline void qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req) +#define CHECK_IF_NOT_EMPTY(param, name, pname, status) \ + do { \ + if (param.length == 0) { \ + QAT_LOG(ERR, \ + "Invalid " name \ + " input parameter, zero length " pname \ + ); \ + status = -EINVAL; \ + } else if (check_zero(param)) { \ + QAT_LOG(ERR, \ + "Invalid " name " input parameter, empty " \ + pname ", length = %d", \ + (int)param.length \ + ); \ + status = -EINVAL; \ + } \ + } while (0) + +#define SET_PKE_LN(where, what, how, idx) \ + rte_memcpy(where[idx] + how - \ + what.length, \ + what.data, \ + what.length) + +static void +request_init(struct icp_qat_fw_pke_request *qat_req) { memset(qat_req, 0, sizeof(*qat_req)); qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; - qat_req->pke_hdr.hdr_flags = ICP_QAT_FW_COMN_HDR_FLAGS_BUILD (ICP_QAT_FW_COMN_REQ_FLAG_SET); } -static inline void qat_asym_build_req_tmpl(void *sess_private_data) -{ - struct icp_qat_fw_pke_request *qat_req; - struct qat_asym_session *session = sess_private_data; - - qat_req = &session->req_tmpl; - qat_fill_req_tmpl(qat_req); -} - -static size_t max_of(int n, ...) -{ - va_list args; - size_t len = 0, num; - int i; - - va_start(args, n); - len = va_arg(args, size_t); - - for (i = 0; i < n - 1; i++) { - num = va_arg(args, size_t); - if (num > len) - len = num; - } - va_end(args); - - return len; -} - -static void qat_clear_arrays(struct qat_asym_op_cookie *cookie, +static void +cleanup_arrays(struct qat_asym_op_cookie *cookie, int in_count, int out_count, int alg_size) { int i; @@ -75,7 +65,8 @@ static void qat_clear_arrays(struct qat_asym_op_cookie *cookie, memset(cookie->output_array[i], 0x0, alg_size); } -static void qat_clear_arrays_crt(struct qat_asym_op_cookie *cookie, +static void +cleanup_crt(struct qat_asym_op_cookie *cookie, int alg_size) { int i; @@ -87,432 +78,489 @@ static void qat_clear_arrays_crt(struct qat_asym_op_cookie *cookie, memset(cookie->output_array[i], 0x0, alg_size); } -static void qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie, +static void +cleanup(struct qat_asym_op_cookie *cookie, struct rte_crypto_asym_xform *xform, int alg_size) { if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) - qat_clear_arrays(cookie, QAT_ASYM_MODEXP_NUM_IN_PARAMS, + cleanup_arrays(cookie, QAT_ASYM_MODEXP_NUM_IN_PARAMS, QAT_ASYM_MODEXP_NUM_OUT_PARAMS, alg_size); else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) - qat_clear_arrays(cookie, QAT_ASYM_MODINV_NUM_IN_PARAMS, + cleanup_arrays(cookie, QAT_ASYM_MODINV_NUM_IN_PARAMS, QAT_ASYM_MODINV_NUM_OUT_PARAMS, alg_size); else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) { if (xform->rsa.key_type == RTE_RSA_KET_TYPE_QT) - qat_clear_arrays_crt(cookie, alg_size); + cleanup_crt(cookie, alg_size); else { - qat_clear_arrays(cookie, QAT_ASYM_RSA_NUM_IN_PARAMS, + cleanup_arrays(cookie, QAT_ASYM_RSA_NUM_IN_PARAMS, QAT_ASYM_RSA_NUM_OUT_PARAMS, alg_size); } } } -static int qat_asym_check_nonzero(rte_crypto_param n) +static int +check_zero(rte_crypto_param n) { - if (n.length < 8) { - /* Not a case for any cryptographic function except for DH - * generator which very often can be of one byte length - */ - size_t i; - - if (n.data[n.length - 1] == 0x0) { - for (i = 0; i < n.length - 1; i++) - if (n.data[i] != 0x0) - break; - if (i == n.length - 1) - return -(EINVAL); - } - } else if (*(uint64_t *)&n.data[ - n.length - 8] == 0) { - /* Very likely it is zeroed modulus */ - size_t i; + int i, len = n.length; - for (i = 0; i < n.length - 8; i++) + if (len < 8) { + for (i = len - 1; i >= 0; i--) { if (n.data[i] != 0x0) - break; - if (i == n.length - 8) - return -(EINVAL); + return 0; + } + } else if (len == 8 && *(uint64_t *)&n.data[len - 8] == 0) { + return 1; + } else if (*(uint64_t *)&n.data[len - 8] == 0) { + for (i = len - 9; i >= 0; i--) { + if (n.data[i] != 0x0) + return 0; + } + } else + return 0; + + return 1; +} + +static struct qat_asym_function +get_asym_function(struct rte_crypto_asym_xform *xform) +{ + struct qat_asym_function qat_function; + + switch (xform->xform_type) { + case RTE_CRYPTO_ASYM_XFORM_MODEX: + qat_function = get_modexp_function(xform); + break; + case RTE_CRYPTO_ASYM_XFORM_MODINV: + qat_function = get_modinv_function(xform); + break; + default: + qat_function.func_id = 0; + break; } - return 0; + return qat_function; } static int -qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op, +modexp_set_input(struct rte_crypto_asym_op *asym_op, struct icp_qat_fw_pke_request *qat_req, struct qat_asym_op_cookie *cookie, struct rte_crypto_asym_xform *xform) { - int err = 0; - size_t alg_size; - size_t alg_size_in_bytes; - uint32_t func_id = 0; + struct qat_asym_function qat_function; + uint32_t alg_bytesize, func_id; + int status = 0; + + CHECK_IF_NOT_EMPTY(xform->modex.modulus, "mod exp", + "modulus", status); + CHECK_IF_NOT_EMPTY(xform->modex.exponent, "mod exp", + "exponent", status); + if (status) + return status; + + qat_function = get_asym_function(xform); + func_id = qat_function.func_id; + if (qat_function.func_id == 0) { + QAT_LOG(ERR, "Cannot obtain functionality id"); + return -EINVAL; + } + alg_bytesize = qat_function.bytesize; - if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) { - err = qat_asym_check_nonzero(xform->modex.modulus); - if (err) { - QAT_LOG(ERR, "Empty modulus in modular exponentiation," - " aborting this operation"); - return err; - } + SET_PKE_LN(cookie->input_array, asym_op->modex.base, + alg_bytesize, 0); + SET_PKE_LN(cookie->input_array, xform->modex.exponent, + alg_bytesize, 1); + SET_PKE_LN(cookie->input_array, xform->modex.modulus, + alg_bytesize, 2); - alg_size_in_bytes = max_of(3, asym_op->modex.base.length, - xform->modex.exponent.length, - xform->modex.modulus.length); - alg_size = alg_size_in_bytes << 3; + cookie->alg_bytesize = alg_bytesize; + qat_req->pke_hdr.cd_pars.func_id = func_id; + qat_req->input_param_count = QAT_ASYM_MODEXP_NUM_IN_PARAMS; + qat_req->output_param_count = QAT_ASYM_MODEXP_NUM_OUT_PARAMS; - if (qat_asym_get_sz_and_func_id(MOD_EXP_SIZE, - sizeof(MOD_EXP_SIZE)/sizeof(*MOD_EXP_SIZE), - &alg_size, &func_id)) { - return -(EINVAL); + HEXDUMP("ModExp base", cookie->input_array[0], alg_bytesize); + HEXDUMP("ModExp exponent", cookie->input_array[1], alg_bytesize); + HEXDUMP("ModExp modulus", cookie->input_array[2], alg_bytesize); + + return status; +} + +static uint8_t +modexp_collect(struct rte_crypto_asym_op *asym_op, + struct qat_asym_op_cookie *cookie, + struct rte_crypto_asym_xform *xform) +{ + rte_crypto_param n = xform->modex.modulus; + uint32_t alg_bytesize = cookie->alg_bytesize; + uint8_t *modexp_result = asym_op->modex.result.data; + + rte_memcpy(modexp_result, + cookie->output_array[0] + alg_bytesize + - n.length, n.length); + HEXDUMP("ModExp result", cookie->output_array[0], + alg_bytesize); + return RTE_CRYPTO_OP_STATUS_SUCCESS; +} + +static int +modinv_set_input(struct rte_crypto_asym_op *asym_op, + struct icp_qat_fw_pke_request *qat_req, + struct qat_asym_op_cookie *cookie, + struct rte_crypto_asym_xform *xform) +{ + struct qat_asym_function qat_function; + uint32_t alg_bytesize, func_id; + int status = 0; + + CHECK_IF_NOT_EMPTY(xform->modex.modulus, "mod inv", + "modulus", status); + if (status) + return status; + + qat_function = get_asym_function(xform); + func_id = qat_function.func_id; + if (func_id == 0) { + QAT_LOG(ERR, "Cannot obtain functionality id"); + return -EINVAL; + } + alg_bytesize = qat_function.bytesize; + + SET_PKE_LN(cookie->input_array, asym_op->modinv.base, + alg_bytesize, 0); + SET_PKE_LN(cookie->input_array, xform->modinv.modulus, + alg_bytesize, 1); + + cookie->alg_bytesize = alg_bytesize; + qat_req->pke_hdr.cd_pars.func_id = func_id; + qat_req->input_param_count = + QAT_ASYM_MODINV_NUM_IN_PARAMS; + qat_req->output_param_count = + QAT_ASYM_MODINV_NUM_OUT_PARAMS; + + HEXDUMP("ModInv base", cookie->input_array[0], alg_bytesize); + HEXDUMP("ModInv modulus", cookie->input_array[1], alg_bytesize); + + return 0; +} + +static uint8_t +modinv_collect(struct rte_crypto_asym_op *asym_op, + struct qat_asym_op_cookie *cookie, + struct rte_crypto_asym_xform *xform) +{ + rte_crypto_param n = xform->modinv.modulus; + uint8_t *modinv_result = asym_op->modinv.result.data; + uint32_t alg_bytesize = cookie->alg_bytesize; + + rte_memcpy(modinv_result + (asym_op->modinv.result.length + - n.length), + cookie->output_array[0] + alg_bytesize + - n.length, n.length); + HEXDUMP("ModInv result", cookie->output_array[0], + alg_bytesize); + return RTE_CRYPTO_OP_STATUS_SUCCESS; +} + +static int +rsa_set_pub_input(struct rte_crypto_asym_op *asym_op, + struct icp_qat_fw_pke_request *qat_req, + struct qat_asym_op_cookie *cookie, + struct rte_crypto_asym_xform *xform) +{ + struct qat_asym_function qat_function; + uint32_t alg_bytesize, func_id; + int status = 0; + + qat_function = get_rsa_enc_function(xform); + func_id = qat_function.func_id; + if (func_id == 0) { + QAT_LOG(ERR, "Cannot obtain functionality id"); + return -EINVAL; + } + alg_bytesize = qat_function.bytesize; + + if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT) { + switch (asym_op->rsa.pad) { + case RTE_CRYPTO_RSA_PADDING_NONE: + SET_PKE_LN(cookie->input_array, asym_op->rsa.message, + alg_bytesize, 0); + break; + default: + QAT_LOG(ERR, + "Invalid RSA padding (Encryption)" + ); + return -EINVAL; } + HEXDUMP("RSA Message", cookie->input_array[0], alg_bytesize); + } else { + switch (asym_op->rsa.pad) { + case RTE_CRYPTO_RSA_PADDING_NONE: + SET_PKE_LN(cookie->input_array, asym_op->rsa.sign, + alg_bytesize, 0); + break; + default: + QAT_LOG(ERR, + "Invalid RSA padding (Verify)"); + return -EINVAL; + } + HEXDUMP("RSA Signature", cookie->input_array[0], + alg_bytesize); + } - alg_size_in_bytes = alg_size >> 3; - rte_memcpy(cookie->input_array[0] + alg_size_in_bytes - - asym_op->modex.base.length - , asym_op->modex.base.data, - asym_op->modex.base.length); - rte_memcpy(cookie->input_array[1] + alg_size_in_bytes - - xform->modex.exponent.length - , xform->modex.exponent.data, - xform->modex.exponent.length); - rte_memcpy(cookie->input_array[2] + alg_size_in_bytes - - xform->modex.modulus.length, - xform->modex.modulus.data, - xform->modex.modulus.length); - cookie->alg_size = alg_size; - qat_req->pke_hdr.cd_pars.func_id = func_id; - qat_req->input_param_count = QAT_ASYM_MODEXP_NUM_IN_PARAMS; - qat_req->output_param_count = QAT_ASYM_MODEXP_NUM_OUT_PARAMS; -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp base", - cookie->input_array[0], - alg_size_in_bytes); - QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp exponent", - cookie->input_array[1], - alg_size_in_bytes); - QAT_DP_HEXDUMP_LOG(DEBUG, " ModExpmodulus", - cookie->input_array[2], - alg_size_in_bytes); -#endif - } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) { - err = qat_asym_check_nonzero(xform->modinv.modulus); - if (err) { - QAT_LOG(ERR, "Empty modulus in modular multiplicative" - " inverse, aborting this operation"); - return err; + SET_PKE_LN(cookie->input_array, xform->rsa.e, + alg_bytesize, 1); + SET_PKE_LN(cookie->input_array, xform->rsa.n, + alg_bytesize, 2); + + cookie->alg_bytesize = alg_bytesize; + qat_req->pke_hdr.cd_pars.func_id = func_id; + + HEXDUMP("RSA Public Key", cookie->input_array[1], alg_bytesize); + HEXDUMP("RSA Modulus", cookie->input_array[2], alg_bytesize); + + return status; +} + +static int +rsa_set_priv_input(struct rte_crypto_asym_op *asym_op, + struct icp_qat_fw_pke_request *qat_req, + struct qat_asym_op_cookie *cookie, + struct rte_crypto_asym_xform *xform) +{ + struct qat_asym_function qat_function; + uint32_t alg_bytesize, func_id; + int status = 0; + + if (xform->rsa.key_type == RTE_RSA_KET_TYPE_QT) { + qat_function = get_rsa_crt_function(xform); + func_id = qat_function.func_id; + if (func_id == 0) { + QAT_LOG(ERR, "Cannot obtain functionality id"); + return -EINVAL; + } + alg_bytesize = qat_function.bytesize; + qat_req->input_param_count = + QAT_ASYM_RSA_QT_NUM_IN_PARAMS; + + SET_PKE_LN(cookie->input_array, xform->rsa.qt.p, + (alg_bytesize >> 1), 1); + SET_PKE_LN(cookie->input_array, xform->rsa.qt.q, + (alg_bytesize >> 1), 2); + SET_PKE_LN(cookie->input_array, xform->rsa.qt.dP, + (alg_bytesize >> 1), 3); + SET_PKE_LN(cookie->input_array, xform->rsa.qt.dQ, + (alg_bytesize >> 1), 4); + SET_PKE_LN(cookie->input_array, xform->rsa.qt.qInv, + (alg_bytesize >> 1), 5); + + HEXDUMP("RSA p", cookie->input_array[1], + alg_bytesize); + HEXDUMP("RSA q", cookie->input_array[2], + alg_bytesize); + HEXDUMP("RSA dP", cookie->input_array[3], + alg_bytesize); + HEXDUMP("RSA dQ", cookie->input_array[4], + alg_bytesize); + HEXDUMP("RSA qInv", cookie->input_array[5], + alg_bytesize); + } else if (xform->rsa.key_type == + RTE_RSA_KEY_TYPE_EXP) { + qat_function = get_rsa_dec_function(xform); + func_id = qat_function.func_id; + if (func_id == 0) { + QAT_LOG(ERR, "Cannot obtain functionality id"); + return -EINVAL; } + alg_bytesize = qat_function.bytesize; - alg_size_in_bytes = max_of(2, asym_op->modinv.base.length, - xform->modinv.modulus.length); - alg_size = alg_size_in_bytes << 3; - - if (xform->modinv.modulus.data[ - xform->modinv.modulus.length - 1] & 0x01) { - if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_ODD, - sizeof(MOD_INV_IDS_ODD)/ - sizeof(*MOD_INV_IDS_ODD), - &alg_size, &func_id)) { - return -(EINVAL); - } - } else { - if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_EVEN, - sizeof(MOD_INV_IDS_EVEN)/ - sizeof(*MOD_INV_IDS_EVEN), - &alg_size, &func_id)) { - return -(EINVAL); - } + SET_PKE_LN(cookie->input_array, xform->rsa.d, + alg_bytesize, 1); + SET_PKE_LN(cookie->input_array, xform->rsa.n, + alg_bytesize, 2); + + HEXDUMP("RSA d", cookie->input_array[1], + alg_bytesize); + HEXDUMP("RSA n", cookie->input_array[2], + alg_bytesize); + } else { + QAT_LOG(ERR, "Invalid RSA key type"); + return -EINVAL; + } + + if (asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_DECRYPT) { + switch (asym_op->rsa.pad) { + case RTE_CRYPTO_RSA_PADDING_NONE: + SET_PKE_LN(cookie->input_array, asym_op->rsa.cipher, + alg_bytesize, 0); + HEXDUMP("RSA ciphertext", cookie->input_array[0], + alg_bytesize); + break; + default: + QAT_LOG(ERR, + "Invalid padding of RSA (Decrypt)"); + return -(EINVAL); } - alg_size_in_bytes = alg_size >> 3; - rte_memcpy(cookie->input_array[0] + alg_size_in_bytes - - asym_op->modinv.base.length - , asym_op->modinv.base.data, - asym_op->modinv.base.length); - rte_memcpy(cookie->input_array[1] + alg_size_in_bytes - - xform->modinv.modulus.length - , xform->modinv.modulus.data, - xform->modinv.modulus.length); - cookie->alg_size = alg_size; - qat_req->pke_hdr.cd_pars.func_id = func_id; - qat_req->input_param_count = - QAT_ASYM_MODINV_NUM_IN_PARAMS; - qat_req->output_param_count = - QAT_ASYM_MODINV_NUM_OUT_PARAMS; -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv base", - cookie->input_array[0], - alg_size_in_bytes); - QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv modulus", - cookie->input_array[1], - alg_size_in_bytes); -#endif - } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) { - err = qat_asym_check_nonzero(xform->rsa.n); - if (err) { - QAT_LOG(ERR, "Empty modulus in RSA" - " inverse, aborting this operation"); - return err; + } else if (asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_SIGN) { + switch (asym_op->rsa.pad) { + case RTE_CRYPTO_RSA_PADDING_NONE: + SET_PKE_LN(cookie->input_array, asym_op->rsa.message, + alg_bytesize, 0); + HEXDUMP("RSA text to be signed", cookie->input_array[0], + alg_bytesize); + break; + default: + QAT_LOG(ERR, + "Invalid padding of RSA (Signature)"); + return -(EINVAL); } + } - alg_size_in_bytes = xform->rsa.n.length; - alg_size = alg_size_in_bytes << 3; + cookie->alg_bytesize = alg_bytesize; + qat_req->pke_hdr.cd_pars.func_id = func_id; + return status; +} - qat_req->input_param_count = - QAT_ASYM_RSA_NUM_IN_PARAMS; - qat_req->output_param_count = - QAT_ASYM_RSA_NUM_OUT_PARAMS; - - if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT || - asym_op->rsa.op_type == - RTE_CRYPTO_ASYM_OP_VERIFY) { - - if (qat_asym_get_sz_and_func_id(RSA_ENC_IDS, - sizeof(RSA_ENC_IDS)/ - sizeof(*RSA_ENC_IDS), - &alg_size, &func_id)) { - err = -(EINVAL); - QAT_LOG(ERR, - "Not supported RSA parameter size (key)"); - return err; - } - alg_size_in_bytes = alg_size >> 3; - if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT) { - switch (asym_op->rsa.pad) { - case RTE_CRYPTO_RSA_PADDING_NONE: - rte_memcpy(cookie->input_array[0] + - alg_size_in_bytes - - asym_op->rsa.message.length - , asym_op->rsa.message.data, - asym_op->rsa.message.length); - break; - default: - err = -(EINVAL); - QAT_LOG(ERR, - "Invalid RSA padding (Encryption)"); - return err; - } -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Message", - cookie->input_array[0], - alg_size_in_bytes); -#endif - } else { - switch (asym_op->rsa.pad) { - case RTE_CRYPTO_RSA_PADDING_NONE: - rte_memcpy(cookie->input_array[0], - asym_op->rsa.sign.data, - alg_size_in_bytes); - break; - default: - err = -(EINVAL); - QAT_LOG(ERR, - "Invalid RSA padding (Verify)"); - return err; - } +static int +rsa_set_input(struct rte_crypto_asym_op *asym_op, + struct icp_qat_fw_pke_request *qat_req, + struct qat_asym_op_cookie *cookie, + struct rte_crypto_asym_xform *xform) +{ + qat_req->input_param_count = + QAT_ASYM_RSA_NUM_IN_PARAMS; + qat_req->output_param_count = + QAT_ASYM_RSA_NUM_OUT_PARAMS; + + if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT || + asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_VERIFY) { + return rsa_set_pub_input(asym_op, qat_req, cookie, xform); + } else { + return rsa_set_priv_input(asym_op, qat_req, cookie, xform); + } +} -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, " RSA Signature", - cookie->input_array[0], - alg_size_in_bytes); -#endif +static uint8_t +rsa_collect(struct rte_crypto_asym_op *asym_op, + struct qat_asym_op_cookie *cookie) +{ + uint32_t alg_bytesize = cookie->alg_bytesize; - } - rte_memcpy(cookie->input_array[1] + - alg_size_in_bytes - - xform->rsa.e.length - , xform->rsa.e.data, - xform->rsa.e.length); - rte_memcpy(cookie->input_array[2] + - alg_size_in_bytes - - xform->rsa.n.length, - xform->rsa.n.data, - xform->rsa.n.length); - - cookie->alg_size = alg_size; - qat_req->pke_hdr.cd_pars.func_id = func_id; + if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT || + asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_VERIFY) { -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Public Key", - cookie->input_array[1], alg_size_in_bytes); - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Modulus", - cookie->input_array[2], alg_size_in_bytes); -#endif - } else { - if (asym_op->rsa.op_type == - RTE_CRYPTO_ASYM_OP_DECRYPT) { - switch (asym_op->rsa.pad) { - case RTE_CRYPTO_RSA_PADDING_NONE: - rte_memcpy(cookie->input_array[0] - + alg_size_in_bytes - - asym_op->rsa.cipher.length, - asym_op->rsa.cipher.data, - asym_op->rsa.cipher.length); - break; - default: - QAT_LOG(ERR, - "Invalid padding of RSA (Decrypt)"); - return -(EINVAL); - } - - } else if (asym_op->rsa.op_type == - RTE_CRYPTO_ASYM_OP_SIGN) { - switch (asym_op->rsa.pad) { - case RTE_CRYPTO_RSA_PADDING_NONE: - rte_memcpy(cookie->input_array[0] - + alg_size_in_bytes - - asym_op->rsa.message.length, - asym_op->rsa.message.data, - asym_op->rsa.message.length); - break; - default: - QAT_LOG(ERR, - "Invalid padding of RSA (Signature)"); - return -(EINVAL); - } - } - if (xform->rsa.key_type == RTE_RSA_KET_TYPE_QT) { - - qat_req->input_param_count = - QAT_ASYM_RSA_QT_NUM_IN_PARAMS; - if (qat_asym_get_sz_and_func_id(RSA_DEC_CRT_IDS, - sizeof(RSA_DEC_CRT_IDS)/ - sizeof(*RSA_DEC_CRT_IDS), - &alg_size, &func_id)) { - return -(EINVAL); - } - alg_size_in_bytes = alg_size >> 3; - - rte_memcpy(cookie->input_array[1] + - (alg_size_in_bytes >> 1) - - xform->rsa.qt.p.length - , xform->rsa.qt.p.data, - xform->rsa.qt.p.length); - rte_memcpy(cookie->input_array[2] + - (alg_size_in_bytes >> 1) - - xform->rsa.qt.q.length - , xform->rsa.qt.q.data, - xform->rsa.qt.q.length); - rte_memcpy(cookie->input_array[3] + - (alg_size_in_bytes >> 1) - - xform->rsa.qt.dP.length - , xform->rsa.qt.dP.data, - xform->rsa.qt.dP.length); - rte_memcpy(cookie->input_array[4] + - (alg_size_in_bytes >> 1) - - xform->rsa.qt.dQ.length - , xform->rsa.qt.dQ.data, - xform->rsa.qt.dQ.length); - rte_memcpy(cookie->input_array[5] + - (alg_size_in_bytes >> 1) - - xform->rsa.qt.qInv.length - , xform->rsa.qt.qInv.data, - xform->rsa.qt.qInv.length); - cookie->alg_size = alg_size; - qat_req->pke_hdr.cd_pars.func_id = func_id; + if (asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_ENCRYPT) { + uint8_t *rsa_result = asym_op->rsa.cipher.data; -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "C", - cookie->input_array[0], - alg_size_in_bytes); - QAT_DP_HEXDUMP_LOG(DEBUG, "p", - cookie->input_array[1], - alg_size_in_bytes); - QAT_DP_HEXDUMP_LOG(DEBUG, "q", - cookie->input_array[2], - alg_size_in_bytes); - QAT_DP_HEXDUMP_LOG(DEBUG, - "dP", cookie->input_array[3], - alg_size_in_bytes); - QAT_DP_HEXDUMP_LOG(DEBUG, - "dQ", cookie->input_array[4], - alg_size_in_bytes); - QAT_DP_HEXDUMP_LOG(DEBUG, - "qInv", cookie->input_array[5], - alg_size_in_bytes); -#endif - } else if (xform->rsa.key_type == - RTE_RSA_KEY_TYPE_EXP) { - if (qat_asym_get_sz_and_func_id( - RSA_DEC_IDS, - sizeof(RSA_DEC_IDS)/ - sizeof(*RSA_DEC_IDS), - &alg_size, &func_id)) { - return -(EINVAL); - } - alg_size_in_bytes = alg_size >> 3; - rte_memcpy(cookie->input_array[1] + - alg_size_in_bytes - - xform->rsa.d.length, - xform->rsa.d.data, - xform->rsa.d.length); - rte_memcpy(cookie->input_array[2] + - alg_size_in_bytes - - xform->rsa.n.length, - xform->rsa.n.data, - xform->rsa.n.length); -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA ciphertext", - cookie->input_array[0], - alg_size_in_bytes); - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA d", cookie->input_array[1], - alg_size_in_bytes); - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA n", cookie->input_array[2], - alg_size_in_bytes); -#endif + rte_memcpy(rsa_result, + cookie->output_array[0], + alg_bytesize); + HEXDUMP("RSA Encrypted data", cookie->output_array[0], + alg_bytesize); + } else { + uint8_t *rsa_result = asym_op->rsa.cipher.data; - cookie->alg_size = alg_size; - qat_req->pke_hdr.cd_pars.func_id = func_id; - } else { - QAT_LOG(ERR, "Invalid RSA key type"); - return -(EINVAL); + switch (asym_op->rsa.pad) { + case RTE_CRYPTO_RSA_PADDING_NONE: + rte_memcpy(rsa_result, + cookie->output_array[0], + alg_bytesize); + HEXDUMP("RSA signature", + cookie->output_array[0], + alg_bytesize); + break; + default: + QAT_LOG(ERR, "Padding not supported"); + return RTE_CRYPTO_OP_STATUS_ERROR; } } } else { - QAT_LOG(ERR, "Invalid asymmetric crypto xform"); - return -(EINVAL); + if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_DECRYPT) { + uint8_t *rsa_result = asym_op->rsa.message.data; + + switch (asym_op->rsa.pad) { + case RTE_CRYPTO_RSA_PADDING_NONE: + rte_memcpy(rsa_result, + cookie->output_array[0], + alg_bytesize); + HEXDUMP("RSA Decrypted Message", + cookie->output_array[0], + alg_bytesize); + break; + default: + QAT_LOG(ERR, "Padding not supported"); + return RTE_CRYPTO_OP_STATUS_ERROR; + } + } else { + uint8_t *rsa_result = asym_op->rsa.sign.data; + + rte_memcpy(rsa_result, + cookie->output_array[0], + alg_bytesize); + HEXDUMP("RSA Signature", cookie->output_array[0], + alg_bytesize); + } } - return 0; + return RTE_CRYPTO_OP_STATUS_SUCCESS; +} + + +static int +asym_set_input(struct rte_crypto_asym_op *asym_op, + struct icp_qat_fw_pke_request *qat_req, + struct qat_asym_op_cookie *cookie, + struct rte_crypto_asym_xform *xform) +{ + switch (xform->xform_type) { + case RTE_CRYPTO_ASYM_XFORM_MODEX: + return modexp_set_input(asym_op, qat_req, + cookie, xform); + case RTE_CRYPTO_ASYM_XFORM_MODINV: + return modinv_set_input(asym_op, qat_req, + cookie, xform); + case RTE_CRYPTO_ASYM_XFORM_RSA: + return rsa_set_input(asym_op, qat_req, + cookie, xform); + default: + QAT_LOG(ERR, "Invalid/unsupported asymmetric crypto xform"); + return -EINVAL; + } + return 1; } int -qat_asym_build_request(void *in_op, - uint8_t *out_msg, - void *op_cookie, +qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen) { - struct qat_asym_session *ctx; struct rte_crypto_op *op = (struct rte_crypto_op *)in_op; struct rte_crypto_asym_op *asym_op = op->asym; struct icp_qat_fw_pke_request *qat_req = (struct icp_qat_fw_pke_request *)out_msg; struct qat_asym_op_cookie *cookie = - (struct qat_asym_op_cookie *)op_cookie; + (struct qat_asym_op_cookie *)op_cookie; int err = 0; op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; - if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { - ctx = (struct qat_asym_session *) - get_asym_session_private_data( - op->asym->session, qat_asym_driver_id); - if (unlikely(ctx == NULL)) { - QAT_LOG(ERR, "Session has not been created for this device"); - goto error; - } - rte_mov64((uint8_t *)qat_req, (const uint8_t *)&(ctx->req_tmpl)); - err = qat_asym_fill_arrays(asym_op, qat_req, cookie, ctx->xform); - if (err) { - op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - goto error; - } - } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { - qat_fill_req_tmpl(qat_req); - err = qat_asym_fill_arrays(asym_op, qat_req, cookie, + switch (op->sess_type) { + case RTE_CRYPTO_OP_WITH_SESSION: + QAT_LOG(ERR, + "QAT asymmetric crypto PMD does not support session" + ); + goto error; + case RTE_CRYPTO_OP_SESSIONLESS: + request_init(qat_req); + err = asym_set_input(asym_op, qat_req, cookie, op->asym->xform); if (err) { op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; goto error; } - } else { + break; + default: QAT_DP_LOG(ERR, "Invalid session/xform settings"); op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; goto error; @@ -522,21 +570,12 @@ qat_asym_build_request(void *in_op, qat_req->pke_mid.src_data_addr = cookie->input_addr; qat_req->pke_mid.dest_data_addr = cookie->output_addr; -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req, - sizeof(struct icp_qat_fw_pke_request)); -#endif + HEXDUMP("qat_req:", qat_req, sizeof(struct icp_qat_fw_pke_request)); return 0; error: - qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op; - -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req, - sizeof(struct icp_qat_fw_pke_request)); -#endif - + HEXDUMP("qat_req:", qat_req, sizeof(struct icp_qat_fw_pke_request)); qat_req->output_param_count = 0; qat_req->input_param_count = 0; qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL; @@ -545,144 +584,30 @@ qat_asym_build_request(void *in_op, return 0; } -static void qat_asym_collect_response(struct rte_crypto_op *rx_op, +static uint8_t +qat_asym_collect_response(struct rte_crypto_op *rx_op, struct qat_asym_op_cookie *cookie, struct rte_crypto_asym_xform *xform) { - size_t alg_size, alg_size_in_bytes = 0; struct rte_crypto_asym_op *asym_op = rx_op->asym; - if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) { - rte_crypto_param n = xform->modex.modulus; - - alg_size = cookie->alg_size; - alg_size_in_bytes = alg_size >> 3; - uint8_t *modexp_result = asym_op->modex.result.data; - - if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) { - rte_memcpy(modexp_result + - (asym_op->modex.result.length - - n.length), - cookie->output_array[0] + alg_size_in_bytes - - n.length, n.length - ); - rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result", - cookie->output_array[0], - alg_size_in_bytes); - -#endif - } - } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) { - rte_crypto_param n = xform->modinv.modulus; - - alg_size = cookie->alg_size; - alg_size_in_bytes = alg_size >> 3; - uint8_t *modinv_result = asym_op->modinv.result.data; - - if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) { - rte_memcpy(modinv_result + (asym_op->modinv.result.length - - n.length), - cookie->output_array[0] + alg_size_in_bytes - - n.length, n.length); - rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result", - cookie->output_array[0], - alg_size_in_bytes); -#endif - } - } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) { - - alg_size = cookie->alg_size; - alg_size_in_bytes = alg_size >> 3; - if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT || - asym_op->rsa.op_type == - RTE_CRYPTO_ASYM_OP_VERIFY) { - if (asym_op->rsa.op_type == - RTE_CRYPTO_ASYM_OP_ENCRYPT) { - uint8_t *rsa_result = asym_op->rsa.cipher.data; - - rte_memcpy(rsa_result, - cookie->output_array[0], - alg_size_in_bytes); - rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data", - cookie->output_array[0], - alg_size_in_bytes); -#endif - } else if (asym_op->rsa.op_type == - RTE_CRYPTO_ASYM_OP_VERIFY) { - uint8_t *rsa_result = asym_op->rsa.cipher.data; - - switch (asym_op->rsa.pad) { - case RTE_CRYPTO_RSA_PADDING_NONE: - rte_memcpy(rsa_result, - cookie->output_array[0], - alg_size_in_bytes); - rx_op->status = - RTE_CRYPTO_OP_STATUS_SUCCESS; -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature", - cookie->output_array[0], - alg_size_in_bytes); -#endif - break; - default: - QAT_LOG(ERR, "Padding not supported"); - rx_op->status = - RTE_CRYPTO_OP_STATUS_ERROR; - break; - } - } - } else { - if (asym_op->rsa.op_type == - RTE_CRYPTO_ASYM_OP_DECRYPT) { - uint8_t *rsa_result = asym_op->rsa.message.data; - - switch (asym_op->rsa.pad) { - case RTE_CRYPTO_RSA_PADDING_NONE: - rte_memcpy(rsa_result, - cookie->output_array[0], - alg_size_in_bytes); - rx_op->status = - RTE_CRYPTO_OP_STATUS_SUCCESS; - break; - default: - QAT_LOG(ERR, "Padding not supported"); - rx_op->status = - RTE_CRYPTO_OP_STATUS_ERROR; - break; - } -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message", - rsa_result, alg_size_in_bytes); -#endif - } else if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN) { - uint8_t *rsa_result = asym_op->rsa.sign.data; - - rte_memcpy(rsa_result, - cookie->output_array[0], - alg_size_in_bytes); - rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature", - cookie->output_array[0], - alg_size_in_bytes); -#endif - } - } + switch (xform->xform_type) { + case RTE_CRYPTO_ASYM_XFORM_MODEX: + return modexp_collect(asym_op, cookie, xform); + case RTE_CRYPTO_ASYM_XFORM_MODINV: + return modinv_collect(asym_op, cookie, xform); + case RTE_CRYPTO_ASYM_XFORM_RSA: + return rsa_collect(asym_op, cookie); + default: + QAT_LOG(ERR, "Not supported xform type"); + return RTE_CRYPTO_OP_STATUS_ERROR; } - qat_clear_arrays_by_alg(cookie, xform, alg_size_in_bytes); } void qat_asym_process_response(void **op, uint8_t *resp, void *op_cookie) { - struct qat_asym_session *ctx; struct icp_qat_fw_pke_resp *resp_msg = (struct icp_qat_fw_pke_resp *)resp; struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t) @@ -709,99 +634,37 @@ qat_asym_process_response(void **op, uint8_t *resp, " returned error"); } } - - if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { - ctx = (struct qat_asym_session *)get_asym_session_private_data( - rx_op->asym->session, qat_asym_driver_id); - qat_asym_collect_response(rx_op, cookie, ctx->xform); - } else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { - qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform); + if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) { + rx_op->status = qat_asym_collect_response(rx_op, + cookie, rx_op->asym->xform); + cleanup(cookie, rx_op->asym->xform, + cookie->alg_bytesize); } - *op = rx_op; -#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG - QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg, - sizeof(struct icp_qat_fw_pke_resp)); -#endif + *op = rx_op; + HEXDUMP("resp_msg:", resp_msg, sizeof(struct icp_qat_fw_pke_resp)); } int -qat_asym_session_configure(struct rte_cryptodev *dev, - struct rte_crypto_asym_xform *xform, - struct rte_cryptodev_asym_session *sess, - struct rte_mempool *mempool) +qat_asym_session_configure(struct rte_cryptodev *dev __rte_unused, + struct rte_crypto_asym_xform *xform __rte_unused, + struct rte_cryptodev_asym_session *sess __rte_unused, + struct rte_mempool *mempool __rte_unused) { - int err = 0; - void *sess_private_data; - struct qat_asym_session *session; - - if (rte_mempool_get(mempool, &sess_private_data)) { - QAT_LOG(ERR, - "Couldn't get object from session mempool"); - return -ENOMEM; - } - - session = sess_private_data; - if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) { - if (xform->modex.exponent.length == 0 || - xform->modex.modulus.length == 0) { - QAT_LOG(ERR, "Invalid mod exp input parameter"); - err = -EINVAL; - goto error; - } - } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) { - if (xform->modinv.modulus.length == 0) { - QAT_LOG(ERR, "Invalid mod inv input parameter"); - err = -EINVAL; - goto error; - } - } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) { - if (xform->rsa.n.length == 0) { - QAT_LOG(ERR, "Invalid rsa input parameter"); - err = -EINVAL; - goto error; - } - } else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END - || xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) { - QAT_LOG(ERR, "Invalid asymmetric crypto xform"); - err = -EINVAL; - goto error; - } else { - QAT_LOG(ERR, "Asymmetric crypto xform not implemented"); - err = -EINVAL; - goto error; - } - - session->xform = xform; - qat_asym_build_req_tmpl(sess_private_data); - set_asym_session_private_data(sess, dev->driver_id, - sess_private_data); - - return 0; -error: - rte_mempool_put(mempool, sess_private_data); - return err; + QAT_LOG(ERR, "QAT asymmetric PMD currently does not support session"); + return -ENOTSUP; } -unsigned int qat_asym_session_get_private_size( - struct rte_cryptodev *dev __rte_unused) +unsigned int +qat_asym_session_get_private_size(struct rte_cryptodev *dev __rte_unused) { - return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8); + QAT_LOG(ERR, "QAT asymmetric PMD currently does not support session"); + return 0; } void -qat_asym_session_clear(struct rte_cryptodev *dev, - struct rte_cryptodev_asym_session *sess) +qat_asym_session_clear(struct rte_cryptodev *dev __rte_unused, + struct rte_cryptodev_asym_session *sess __rte_unused) { - uint8_t index = dev->driver_id; - void *sess_priv = get_asym_session_private_data(sess, index); - struct qat_asym_session *s = (struct qat_asym_session *)sess_priv; - - if (sess_priv) { - memset(s, 0, qat_asym_session_get_private_size(dev)); - struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - - set_asym_session_private_data(sess, index, NULL); - rte_mempool_put(sess_mp, sess_priv); - } + QAT_LOG(ERR, "QAT asymmetric PMD currently does not support session"); } diff --git a/drivers/crypto/qat/qat_asym.h b/drivers/crypto/qat/qat_asym.h index 308b6b2e0b..ed4367ac4d 100644 --- a/drivers/crypto/qat/qat_asym.h +++ b/drivers/crypto/qat/qat_asym.h @@ -27,7 +27,7 @@ typedef uint64_t large_int_ptr; #define QAT_ASYM_RSA_QT_NUM_IN_PARAMS 6 struct qat_asym_op_cookie { - size_t alg_size; + size_t alg_bytesize; uint64_t error; rte_iova_t input_addr; rte_iova_t output_addr; -- 2.13.6