From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id A86DDA0C4D; Tue, 2 Nov 2021 00:13:41 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 1F48841134; Tue, 2 Nov 2021 00:13:19 +0100 (CET) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by mails.dpdk.org (Postfix) with ESMTP id BC0984111B for ; Tue, 2 Nov 2021 00:13:16 +0100 (CET) X-IronPort-AV: E=McAfee;i="6200,9189,10155"; a="211195498" X-IronPort-AV: E=Sophos;i="5.87,201,1631602800"; d="scan'208";a="211195498" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 01 Nov 2021 16:13:15 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.87,201,1631602800"; d="scan'208";a="500254040" Received: from silpixa00400272.ir.intel.com (HELO silpixa00400272.ger.corp.intel.com) ([10.237.223.111]) by orsmga008.jf.intel.com with ESMTP; 01 Nov 2021 16:13:14 -0700 From: Kai Ji To: dev@dpdk.org Cc: Kai Ji Date: Mon, 1 Nov 2021 23:12:53 +0000 Message-Id: <20211101231257.7629-4-kai.ji@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20211101231257.7629-1-kai.ji@intel.com> References: <20211026172518.20183-1-kai.ji@intel.com> <20211101231257.7629-1-kai.ji@intel.com> Subject: [dpdk-dev] [dpdk-dev v2 3/7] crypto/qat: qat driver asym op refactor X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch add-in refactored qat asymmetric build request function implementation (qat_asym_refactor.c & qat_asym_refactor.h) Signed-off-by: Kai Ji Acked-by: Fan Zhang --- drivers/crypto/qat/dev/qat_asym_pmd_gen1.c | 7 + drivers/crypto/qat/qat_asym_refactor.c | 994 +++++++++++++++++++++ drivers/crypto/qat/qat_asym_refactor.h | 125 +++ 3 files changed, 1126 insertions(+) create mode 100644 drivers/crypto/qat/qat_asym_refactor.c create mode 100644 drivers/crypto/qat/qat_asym_refactor.h diff --git a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c index 9ed1f21d9d..99d90fa56c 100644 --- a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c +++ b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c @@ -65,6 +65,13 @@ qat_asym_crypto_feature_flags_get_gen1( return feature_flags; } +int +qat_asym_crypto_set_session_gen1(void *cdev __rte_unused, + void *session __rte_unused) +{ + return 0; +} + RTE_INIT(qat_asym_crypto_gen1_init) { qat_asym_gen_dev_ops[QAT_GEN1].cryptodev_ops = diff --git a/drivers/crypto/qat/qat_asym_refactor.c b/drivers/crypto/qat/qat_asym_refactor.c new file mode 100644 index 0000000000..8e789920cb --- /dev/null +++ b/drivers/crypto/qat/qat_asym_refactor.c @@ -0,0 +1,994 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 - 2021 Intel Corporation + */ + + +#include + +#include + +#include "icp_qat_fw_pke.h" +#include "icp_qat_fw.h" +#include "qat_pke_functionality_arrays.h" + +#include "qat_device.h" + +#include "qat_logs.h" +#include "qat_asym.h" + +uint8_t qat_asym_driver_id; + +struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[QAT_N_GENS]; + +void +qat_asym_init_op_cookie(void *op_cookie) +{ + int j; + struct qat_asym_op_cookie *cookie = op_cookie; + + cookie->input_addr = rte_mempool_virt2iova(cookie) + + offsetof(struct qat_asym_op_cookie, + input_params_ptrs); + + cookie->output_addr = rte_mempool_virt2iova(cookie) + + offsetof(struct qat_asym_op_cookie, + output_params_ptrs); + + for (j = 0; j < 8; j++) { + cookie->input_params_ptrs[j] = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_asym_op_cookie, + input_array[j]); + cookie->output_params_ptrs[j] = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_asym_op_cookie, + output_array[j]); + } +} + +int +qat_asym_session_configure(struct rte_cryptodev *dev, + struct rte_crypto_asym_xform *xform, + struct rte_cryptodev_asym_session *sess, + struct rte_mempool *mempool) +{ + int err = 0; + void *sess_private_data; + struct qat_asym_session *session; + + if (rte_mempool_get(mempool, &sess_private_data)) { + QAT_LOG(ERR, + "Couldn't get object from session mempool"); + return -ENOMEM; + } + + session = sess_private_data; + if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) { + if (xform->modex.exponent.length == 0 || + xform->modex.modulus.length == 0) { + QAT_LOG(ERR, "Invalid mod exp input parameter"); + err = -EINVAL; + goto error; + } + } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) { + if (xform->modinv.modulus.length == 0) { + QAT_LOG(ERR, "Invalid mod inv input parameter"); + err = -EINVAL; + goto error; + } + } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) { + if (xform->rsa.n.length == 0) { + QAT_LOG(ERR, "Invalid rsa input parameter"); + err = -EINVAL; + goto error; + } + } else if (xform->xform_type >= RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END + || xform->xform_type <= RTE_CRYPTO_ASYM_XFORM_NONE) { + QAT_LOG(ERR, "Invalid asymmetric crypto xform"); + err = -EINVAL; + goto error; + } else { + QAT_LOG(ERR, "Asymmetric crypto xform not implemented"); + err = -EINVAL; + goto error; + } + + session->xform = xform; + qat_asym_build_req_tmpl(sess_private_data); + set_asym_session_private_data(sess, dev->driver_id, + sess_private_data); + + return 0; +error: + rte_mempool_put(mempool, sess_private_data); + return err; +} + +unsigned int +qat_asym_session_get_private_size( + struct rte_cryptodev *dev __rte_unused) +{ + return RTE_ALIGN_CEIL(sizeof(struct qat_asym_session), 8); +} + +void +qat_asym_session_clear(struct rte_cryptodev *dev, + struct rte_cryptodev_asym_session *sess) +{ + uint8_t index = dev->driver_id; + void *sess_priv = get_asym_session_private_data(sess, index); + struct qat_asym_session *s = (struct qat_asym_session *)sess_priv; + + if (sess_priv) { + memset(s, 0, qat_asym_session_get_private_size(dev)); + struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); + + set_asym_session_private_data(sess, index, NULL); + rte_mempool_put(sess_mp, sess_priv); + } +} + +/* An rte_driver is needed in the registration of both the device and the driver + * with cryptodev. + * The actual qat pci's rte_driver can't be used as its name represents + * the whole pci device with all services. Think of this as a holder for a name + * for the crypto part of the pci device. + */ +static const char qat_asym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_ASYM_PMD); +static const struct rte_driver cryptodev_qat_asym_driver = { + .name = qat_asym_drv_name, + .alias = qat_asym_drv_name +}; + + +static void +qat_clear_arrays(struct qat_asym_op_cookie *cookie, + int in_count, int out_count, int in_size, int out_size) +{ + int i; + + for (i = 0; i < in_count; i++) + memset(cookie->input_array[i], 0x0, in_size); + for (i = 0; i < out_count; i++) + memset(cookie->output_array[i], 0x0, out_size); +} + +static void +qat_clear_arrays_by_alg(struct qat_asym_op_cookie *cookie, + enum rte_crypto_asym_xform_type alg, int in_size, int out_size) +{ + if (alg == RTE_CRYPTO_ASYM_XFORM_MODEX) + qat_clear_arrays(cookie, QAT_ASYM_MODEXP_NUM_IN_PARAMS, + QAT_ASYM_MODEXP_NUM_OUT_PARAMS, in_size, + out_size); + else if (alg == RTE_CRYPTO_ASYM_XFORM_MODINV) + qat_clear_arrays(cookie, QAT_ASYM_MODINV_NUM_IN_PARAMS, + QAT_ASYM_MODINV_NUM_OUT_PARAMS, in_size, + out_size); +} + +static void +qat_asym_collect_response(struct rte_crypto_op *rx_op, + struct qat_asym_op_cookie *cookie, + struct rte_crypto_asym_xform *xform) +{ + size_t alg_size, alg_size_in_bytes = 0; + struct rte_crypto_asym_op *asym_op = rx_op->asym; + + if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) { + rte_crypto_param n = xform->modex.modulus; + + alg_size = cookie->alg_size; + alg_size_in_bytes = alg_size >> 3; + uint8_t *modexp_result = asym_op->modex.result.data; + + if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) { + rte_memcpy(modexp_result + + (asym_op->modex.result.length - + n.length), + cookie->output_array[0] + alg_size_in_bytes + - n.length, n.length + ); + rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp result", + cookie->output_array[0], + alg_size_in_bytes); + +#endif + } + } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) { + rte_crypto_param n = xform->modinv.modulus; + + alg_size = cookie->alg_size; + alg_size_in_bytes = alg_size >> 3; + uint8_t *modinv_result = asym_op->modinv.result.data; + + if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) { + rte_memcpy(modinv_result + + (asym_op->modinv.result.length - n.length), + cookie->output_array[0] + alg_size_in_bytes + - n.length, n.length); + rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv result", + cookie->output_array[0], + alg_size_in_bytes); +#endif + } + } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) { + + alg_size = cookie->alg_size; + alg_size_in_bytes = alg_size >> 3; + if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT || + asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_VERIFY) { + if (asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_ENCRYPT) { + uint8_t *rsa_result = asym_op->rsa.cipher.data; + + rte_memcpy(rsa_result, + cookie->output_array[0], + alg_size_in_bytes); + rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Encrypted data", + cookie->output_array[0], + alg_size_in_bytes); +#endif + } else if (asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_VERIFY) { + uint8_t *rsa_result = asym_op->rsa.cipher.data; + + switch (asym_op->rsa.pad) { + case RTE_CRYPTO_RSA_PADDING_NONE: + rte_memcpy(rsa_result, + cookie->output_array[0], + alg_size_in_bytes); + rx_op->status = + RTE_CRYPTO_OP_STATUS_SUCCESS; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature", + cookie->output_array[0], + alg_size_in_bytes); +#endif + break; + default: + QAT_LOG(ERR, "Padding not supported"); + rx_op->status = + RTE_CRYPTO_OP_STATUS_ERROR; + break; + } + } + } else { + if (asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_DECRYPT) { + uint8_t *rsa_result = asym_op->rsa.message.data; + + switch (asym_op->rsa.pad) { + case RTE_CRYPTO_RSA_PADDING_NONE: + rte_memcpy(rsa_result, + cookie->output_array[0], + alg_size_in_bytes); + break; + default: + QAT_LOG(ERR, "Padding not supported"); + rx_op->status = + RTE_CRYPTO_OP_STATUS_ERROR; + break; + } +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Decrypted Message", + rsa_result, alg_size_in_bytes); +#endif + } else if (asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_SIGN) { + uint8_t *rsa_result = asym_op->rsa.sign.data; + + rte_memcpy(rsa_result, + cookie->output_array[0], + alg_size_in_bytes); + rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Signature", + cookie->output_array[0], + alg_size_in_bytes); +#endif + } + } + } + qat_clear_arrays_by_alg(cookie, xform->xform_type, alg_size_in_bytes, + alg_size_in_bytes); +} + +int +qat_asym_process_response(void __rte_unused * *op, uint8_t *resp, + void *op_cookie, __rte_unused uint64_t *dequeue_err_count) +{ + struct qat_asym_session *ctx; + struct icp_qat_fw_pke_resp *resp_msg = + (struct icp_qat_fw_pke_resp *)resp; + struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t) + (resp_msg->opaque); + struct qat_asym_op_cookie *cookie = op_cookie; + + if (cookie->error) { + cookie->error = 0; + if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) + rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR; + QAT_DP_LOG(ERR, "Cookie status returned error"); + } else { + if (ICP_QAT_FW_PKE_RESP_PKE_STAT_GET( + resp_msg->pke_resp_hdr.resp_status.pke_resp_flags)) { + if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) + rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR; + QAT_DP_LOG(ERR, "Asymmetric response status" + " returned error"); + } + if (resp_msg->pke_resp_hdr.resp_status.comn_err_code) { + if (rx_op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) + rx_op->status = RTE_CRYPTO_OP_STATUS_ERROR; + QAT_DP_LOG(ERR, "Asymmetric common status" + " returned error"); + } + } + + if (rx_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { + ctx = (struct qat_asym_session *)get_asym_session_private_data( + rx_op->asym->session, qat_asym_driver_id); + qat_asym_collect_response(rx_op, cookie, ctx->xform); + } else if (rx_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { + qat_asym_collect_response(rx_op, cookie, rx_op->asym->xform); + } + *op = rx_op; + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "resp_msg:", resp_msg, + sizeof(struct icp_qat_fw_pke_resp)); +#endif + + return 1; +} + +#define qat_asym_sz_2param(arg) (arg, sizeof(arg)/sizeof(*arg)) + +static int +qat_asym_get_sz_and_func_id(const uint32_t arr[][2], + size_t arr_sz, size_t *size, uint32_t *func_id) +{ + size_t i; + + for (i = 0; i < arr_sz; i++) { + if (*size <= arr[i][0]) { + *size = arr[i][0]; + *func_id = arr[i][1]; + return 0; + } + } + return -1; +} + +static size_t +max_of(int n, ...) +{ + va_list args; + size_t len = 0, num; + int i; + + va_start(args, n); + len = va_arg(args, size_t); + + for (i = 0; i < n - 1; i++) { + num = va_arg(args, size_t); + if (num > len) + len = num; + } + va_end(args); + + return len; +} + +static int +qat_asym_check_nonzero(rte_crypto_param n) +{ + if (n.length < 8) { + /* Not a case for any cryptograpic function except for DH + * generator which very often can be of one byte length + */ + size_t i; + + if (n.data[n.length - 1] == 0x0) { + for (i = 0; i < n.length - 1; i++) + if (n.data[i] != 0x0) + break; + if (i == n.length - 1) + return -(EINVAL); + } + } else if (*(uint64_t *)&n.data[ + n.length - 8] == 0) { + /* Very likely it is zeroed modulus */ + size_t i; + + for (i = 0; i < n.length - 8; i++) + if (n.data[i] != 0x0) + break; + if (i == n.length - 8) + return -(EINVAL); + } + + return 0; +} + +static int +qat_asym_fill_arrays(struct rte_crypto_asym_op *asym_op, + struct icp_qat_fw_pke_request *qat_req, + struct qat_asym_op_cookie *cookie, + struct rte_crypto_asym_xform *xform) +{ + int err = 0; + size_t alg_size; + size_t alg_size_in_bytes; + uint32_t func_id = 0; + + if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODEX) { + err = qat_asym_check_nonzero(xform->modex.modulus); + if (err) { + QAT_LOG(ERR, "Empty modulus in modular exponentiation," + " aborting this operation"); + return err; + } + + alg_size_in_bytes = max_of(3, asym_op->modex.base.length, + xform->modex.exponent.length, + xform->modex.modulus.length); + alg_size = alg_size_in_bytes << 3; + + if (qat_asym_get_sz_and_func_id(MOD_EXP_SIZE, + sizeof(MOD_EXP_SIZE)/sizeof(*MOD_EXP_SIZE), + &alg_size, &func_id)) { + return -(EINVAL); + } + + alg_size_in_bytes = alg_size >> 3; + rte_memcpy(cookie->input_array[0] + alg_size_in_bytes - + asym_op->modex.base.length + , asym_op->modex.base.data, + asym_op->modex.base.length); + rte_memcpy(cookie->input_array[1] + alg_size_in_bytes - + xform->modex.exponent.length + , xform->modex.exponent.data, + xform->modex.exponent.length); + rte_memcpy(cookie->input_array[2] + alg_size_in_bytes - + xform->modex.modulus.length, + xform->modex.modulus.data, + xform->modex.modulus.length); + cookie->alg_size = alg_size; + qat_req->pke_hdr.cd_pars.func_id = func_id; + qat_req->input_param_count = QAT_ASYM_MODEXP_NUM_IN_PARAMS; + qat_req->output_param_count = QAT_ASYM_MODEXP_NUM_OUT_PARAMS; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp base", + cookie->input_array[0], + alg_size_in_bytes); + QAT_DP_HEXDUMP_LOG(DEBUG, "ModExp exponent", + cookie->input_array[1], + alg_size_in_bytes); + QAT_DP_HEXDUMP_LOG(DEBUG, " ModExpmodulus", + cookie->input_array[2], + alg_size_in_bytes); +#endif + } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_MODINV) { + err = qat_asym_check_nonzero(xform->modinv.modulus); + if (err) { + QAT_LOG(ERR, "Empty modulus in modular multiplicative" + " inverse, aborting this operation"); + return err; + } + + alg_size_in_bytes = max_of(2, asym_op->modinv.base.length, + xform->modinv.modulus.length); + alg_size = alg_size_in_bytes << 3; + + if (xform->modinv.modulus.data[ + xform->modinv.modulus.length - 1] & 0x01) { + if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_ODD, + sizeof(MOD_INV_IDS_ODD)/ + sizeof(*MOD_INV_IDS_ODD), + &alg_size, &func_id)) { + return -(EINVAL); + } + } else { + if (qat_asym_get_sz_and_func_id(MOD_INV_IDS_EVEN, + sizeof(MOD_INV_IDS_EVEN)/ + sizeof(*MOD_INV_IDS_EVEN), + &alg_size, &func_id)) { + return -(EINVAL); + } + } + + alg_size_in_bytes = alg_size >> 3; + rte_memcpy(cookie->input_array[0] + alg_size_in_bytes - + asym_op->modinv.base.length + , asym_op->modinv.base.data, + asym_op->modinv.base.length); + rte_memcpy(cookie->input_array[1] + alg_size_in_bytes - + xform->modinv.modulus.length + , xform->modinv.modulus.data, + xform->modinv.modulus.length); + cookie->alg_size = alg_size; + qat_req->pke_hdr.cd_pars.func_id = func_id; + qat_req->input_param_count = + QAT_ASYM_MODINV_NUM_IN_PARAMS; + qat_req->output_param_count = + QAT_ASYM_MODINV_NUM_OUT_PARAMS; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv base", + cookie->input_array[0], + alg_size_in_bytes); + QAT_DP_HEXDUMP_LOG(DEBUG, "ModInv modulus", + cookie->input_array[1], + alg_size_in_bytes); +#endif + } else if (xform->xform_type == RTE_CRYPTO_ASYM_XFORM_RSA) { + err = qat_asym_check_nonzero(xform->rsa.n); + if (err) { + QAT_LOG(ERR, "Empty modulus in RSA" + " inverse, aborting this operation"); + return err; + } + + alg_size_in_bytes = xform->rsa.n.length; + alg_size = alg_size_in_bytes << 3; + + qat_req->input_param_count = + QAT_ASYM_RSA_NUM_IN_PARAMS; + qat_req->output_param_count = + QAT_ASYM_RSA_NUM_OUT_PARAMS; + + if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT || + asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_VERIFY) { + + if (qat_asym_get_sz_and_func_id(RSA_ENC_IDS, + sizeof(RSA_ENC_IDS)/ + sizeof(*RSA_ENC_IDS), + &alg_size, &func_id)) { + err = -(EINVAL); + QAT_LOG(ERR, + "Not supported RSA parameter size (key)"); + return err; + } + alg_size_in_bytes = alg_size >> 3; + if (asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_ENCRYPT) { + switch (asym_op->rsa.pad) { + case RTE_CRYPTO_RSA_PADDING_NONE: + rte_memcpy(cookie->input_array[0] + + alg_size_in_bytes - + asym_op->rsa.message.length + , asym_op->rsa.message.data, + asym_op->rsa.message.length); + break; + default: + err = -(EINVAL); + QAT_LOG(ERR, + "Invalid RSA padding (Encryption)"); + return err; + } +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Message", + cookie->input_array[0], + alg_size_in_bytes); +#endif + } else { + switch (asym_op->rsa.pad) { + case RTE_CRYPTO_RSA_PADDING_NONE: + rte_memcpy(cookie->input_array[0], + asym_op->rsa.sign.data, + alg_size_in_bytes); + break; + default: + err = -(EINVAL); + QAT_LOG(ERR, + "Invalid RSA padding (Verify)"); + return err; + } + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, " RSA Signature", + cookie->input_array[0], + alg_size_in_bytes); +#endif + + } + rte_memcpy(cookie->input_array[1] + + alg_size_in_bytes - + xform->rsa.e.length + , xform->rsa.e.data, + xform->rsa.e.length); + rte_memcpy(cookie->input_array[2] + + alg_size_in_bytes - + xform->rsa.n.length, + xform->rsa.n.data, + xform->rsa.n.length); + + cookie->alg_size = alg_size; + qat_req->pke_hdr.cd_pars.func_id = func_id; + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Public Key", + cookie->input_array[1], + alg_size_in_bytes); + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA Modulus", + cookie->input_array[2], + alg_size_in_bytes); +#endif + } else { + if (asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_DECRYPT) { + switch (asym_op->rsa.pad) { + case RTE_CRYPTO_RSA_PADDING_NONE: + rte_memcpy(cookie->input_array[0] + + alg_size_in_bytes - + asym_op->rsa.cipher.length, + asym_op->rsa.cipher.data, + asym_op->rsa.cipher.length); + break; + default: + QAT_LOG(ERR, + "Invalid padding of RSA (Decrypt)"); + return -(EINVAL); + } + + } else if (asym_op->rsa.op_type == + RTE_CRYPTO_ASYM_OP_SIGN) { + switch (asym_op->rsa.pad) { + case RTE_CRYPTO_RSA_PADDING_NONE: + rte_memcpy(cookie->input_array[0] + + alg_size_in_bytes - + asym_op->rsa.message.length, + asym_op->rsa.message.data, + asym_op->rsa.message.length); + break; + default: + QAT_LOG(ERR, + "Invalid padding of RSA (Signature)"); + return -(EINVAL); + } + } + if (xform->rsa.key_type == RTE_RSA_KET_TYPE_QT) { + + qat_req->input_param_count = + QAT_ASYM_RSA_QT_NUM_IN_PARAMS; + if (qat_asym_get_sz_and_func_id(RSA_DEC_CRT_IDS, + sizeof(RSA_DEC_CRT_IDS)/ + sizeof(*RSA_DEC_CRT_IDS), + &alg_size, &func_id)) { + return -(EINVAL); + } + alg_size_in_bytes = alg_size >> 3; + + rte_memcpy(cookie->input_array[1] + + (alg_size_in_bytes >> 1) - + xform->rsa.qt.p.length + , xform->rsa.qt.p.data, + xform->rsa.qt.p.length); + rte_memcpy(cookie->input_array[2] + + (alg_size_in_bytes >> 1) - + xform->rsa.qt.q.length + , xform->rsa.qt.q.data, + xform->rsa.qt.q.length); + rte_memcpy(cookie->input_array[3] + + (alg_size_in_bytes >> 1) - + xform->rsa.qt.dP.length + , xform->rsa.qt.dP.data, + xform->rsa.qt.dP.length); + rte_memcpy(cookie->input_array[4] + + (alg_size_in_bytes >> 1) - + xform->rsa.qt.dQ.length + , xform->rsa.qt.dQ.data, + xform->rsa.qt.dQ.length); + rte_memcpy(cookie->input_array[5] + + (alg_size_in_bytes >> 1) - + xform->rsa.qt.qInv.length + , xform->rsa.qt.qInv.data, + xform->rsa.qt.qInv.length); + cookie->alg_size = alg_size; + qat_req->pke_hdr.cd_pars.func_id = func_id; + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "C", + cookie->input_array[0], + alg_size_in_bytes); + QAT_DP_HEXDUMP_LOG(DEBUG, "p", + cookie->input_array[1], + alg_size_in_bytes); + QAT_DP_HEXDUMP_LOG(DEBUG, "q", + cookie->input_array[2], + alg_size_in_bytes); + QAT_DP_HEXDUMP_LOG(DEBUG, + "dP", cookie->input_array[3], + alg_size_in_bytes); + QAT_DP_HEXDUMP_LOG(DEBUG, + "dQ", cookie->input_array[4], + alg_size_in_bytes); + QAT_DP_HEXDUMP_LOG(DEBUG, + "qInv", cookie->input_array[5], + alg_size_in_bytes); +#endif + } else if (xform->rsa.key_type == + RTE_RSA_KEY_TYPE_EXP) { + if (qat_asym_get_sz_and_func_id( + RSA_DEC_IDS, + sizeof(RSA_DEC_IDS)/ + sizeof(*RSA_DEC_IDS), + &alg_size, &func_id)) { + return -(EINVAL); + } + alg_size_in_bytes = alg_size >> 3; + rte_memcpy(cookie->input_array[1] + + alg_size_in_bytes - + xform->rsa.d.length, + xform->rsa.d.data, + xform->rsa.d.length); + rte_memcpy(cookie->input_array[2] + + alg_size_in_bytes - + xform->rsa.n.length, + xform->rsa.n.data, + xform->rsa.n.length); +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA ciphertext", + cookie->input_array[0], + alg_size_in_bytes); + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA d", + cookie->input_array[1], + alg_size_in_bytes); + QAT_DP_HEXDUMP_LOG(DEBUG, "RSA n", + cookie->input_array[2], + alg_size_in_bytes); +#endif + + cookie->alg_size = alg_size; + qat_req->pke_hdr.cd_pars.func_id = func_id; + } else { + QAT_LOG(ERR, "Invalid RSA key type"); + return -(EINVAL); + } + } + } else { + QAT_LOG(ERR, "Invalid asymmetric crypto xform"); + return -(EINVAL); + } + return 0; +} + +static __rte_always_inline int +qat_asym_build_request(void *in_op, uint8_t *out_msg, void *op_cookie, + __rte_unused uint64_t *opaque, + __rte_unused enum qat_device_gen dev_gen) +{ + struct qat_asym_session *ctx; + struct rte_crypto_op *op = (struct rte_crypto_op *)in_op; + struct rte_crypto_asym_op *asym_op = op->asym; + struct icp_qat_fw_pke_request *qat_req = + (struct icp_qat_fw_pke_request *)out_msg; + struct qat_asym_op_cookie *cookie = + (struct qat_asym_op_cookie *)op_cookie; + int err = 0; + + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { + ctx = (struct qat_asym_session *) + get_asym_session_private_data( + op->asym->session, qat_asym_driver_id); + if (unlikely(ctx == NULL)) { + QAT_LOG(ERR, "Session has not been created for this device"); + goto error; + } + rte_mov64((uint8_t *)qat_req, + (const uint8_t *)&(ctx->req_tmpl)); + err = qat_asym_fill_arrays(asym_op, qat_req, + cookie, ctx->xform); + if (err) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + goto error; + } + } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { + qat_fill_req_tmpl(qat_req); + err = qat_asym_fill_arrays(asym_op, qat_req, cookie, + op->asym->xform); + if (err) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + goto error; + } + } else { + QAT_DP_LOG(ERR, "Invalid session/xform settings"); + op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + goto error; + } + + qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op; + qat_req->pke_mid.src_data_addr = cookie->input_addr; + qat_req->pke_mid.dest_data_addr = cookie->output_addr; + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req, + sizeof(struct icp_qat_fw_pke_request)); +#endif + + return 0; +error: + + qat_req->pke_mid.opaque = (uint64_t)(uintptr_t)op; + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req, + sizeof(struct icp_qat_fw_pke_request)); +#endif + + qat_req->output_param_count = 0; + qat_req->input_param_count = 0; + qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL; + cookie->error |= err; + + return 0; +} + +static uint16_t +qat_asym_crypto_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + return qat_enqueue_op_burst(qp, qat_asym_build_request, (void **)ops, + nb_ops); +} + +static uint16_t +qat_asym_crypto_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + return qat_dequeue_op_burst(qp, (void **)ops, qat_asym_process_response, + nb_ops); +} + +int +qat_asym_dev_create(struct qat_pci_device *qat_pci_dev, + struct qat_dev_cmd_param *qat_dev_cmd_param) +{ + struct qat_cryptodev_private *internals; + struct rte_cryptodev *cryptodev; + struct qat_device_info *qat_dev_instance = + &qat_pci_devs[qat_pci_dev->qat_dev_id]; + struct rte_cryptodev_pmd_init_params init_params = { + .name = "", + .socket_id = qat_dev_instance->pci_dev->device.numa_node, + .private_data_size = sizeof(struct qat_cryptodev_private) + }; + struct qat_capabilities_info capa_info; + const struct rte_cryptodev_capabilities *capabilities; + const struct qat_crypto_gen_dev_ops *gen_dev_ops = + &qat_asym_gen_dev_ops[qat_pci_dev->qat_dev_gen]; + char name[RTE_CRYPTODEV_NAME_MAX_LEN]; + char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN]; + uint64_t capa_size; + int i = 0; + + snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s", + qat_pci_dev->name, "asym"); + QAT_LOG(DEBUG, "Creating QAT ASYM device %s\n", name); + + if (gen_dev_ops->cryptodev_ops == NULL) { + QAT_LOG(ERR, "Device %s does not support asymmetric crypto", + name); + return -(EFAULT); + } + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + qat_pci_dev->qat_asym_driver_id = + qat_asym_driver_id; + } else if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + if (qat_pci_dev->qat_asym_driver_id != + qat_asym_driver_id) { + QAT_LOG(ERR, + "Device %s have different driver id than corresponding device in primary process", + name); + return -(EFAULT); + } + } + + /* Populate subset device to use in cryptodev device creation */ + qat_dev_instance->asym_rte_dev.driver = &cryptodev_qat_asym_driver; + qat_dev_instance->asym_rte_dev.numa_node = + qat_dev_instance->pci_dev->device.numa_node; + qat_dev_instance->asym_rte_dev.devargs = NULL; + + cryptodev = rte_cryptodev_pmd_create(name, + &(qat_dev_instance->asym_rte_dev), &init_params); + + if (cryptodev == NULL) + return -ENODEV; + + qat_dev_instance->asym_rte_dev.name = cryptodev->data->name; + cryptodev->driver_id = qat_asym_driver_id; + cryptodev->dev_ops = gen_dev_ops->cryptodev_ops; + + cryptodev->enqueue_burst = qat_asym_crypto_enqueue_op_burst; + cryptodev->dequeue_burst = qat_asym_crypto_dequeue_op_burst; + + cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN, + "QAT_ASYM_CAPA_GEN_%d", + qat_pci_dev->qat_dev_gen); + + internals = cryptodev->data->dev_private; + internals->qat_dev = qat_pci_dev; + internals->dev_id = cryptodev->data->dev_id; + + capa_info = gen_dev_ops->get_capabilities(qat_pci_dev); + capabilities = capa_info.data; + capa_size = capa_info.size; + + internals->capa_mz = rte_memzone_lookup(capa_memz_name); + if (internals->capa_mz == NULL) { + internals->capa_mz = rte_memzone_reserve(capa_memz_name, + capa_size, rte_socket_id(), 0); + if (internals->capa_mz == NULL) { + QAT_LOG(DEBUG, + "Error allocating memzone for capabilities, " + "destroying PMD for %s", + name); + rte_cryptodev_pmd_destroy(cryptodev); + memset(&qat_dev_instance->asym_rte_dev, 0, + sizeof(qat_dev_instance->asym_rte_dev)); + return -EFAULT; + } + } + + memcpy(internals->capa_mz->addr, capabilities, capa_size); + internals->qat_dev_capabilities = internals->capa_mz->addr; + + while (1) { + if (qat_dev_cmd_param[i].name == NULL) + break; + if (!strcmp(qat_dev_cmd_param[i].name, ASYM_ENQ_THRESHOLD_NAME)) + internals->min_enq_burst_threshold = + qat_dev_cmd_param[i].val; + i++; + } + + qat_pci_dev->asym_dev = internals; + internals->service_type = QAT_SERVICE_ASYMMETRIC; + QAT_LOG(DEBUG, "Created QAT ASYM device %s as cryptodev instance %d", + cryptodev->data->name, internals->dev_id); + return 0; +} + +int +qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev) +{ + struct rte_cryptodev *cryptodev; + + if (qat_pci_dev == NULL) + return -ENODEV; + if (qat_pci_dev->asym_dev == NULL) + return 0; + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + rte_memzone_free(qat_pci_dev->asym_dev->capa_mz); + + /* free crypto device */ + cryptodev = rte_cryptodev_pmd_get_dev( + qat_pci_dev->asym_dev->dev_id); + rte_cryptodev_pmd_destroy(cryptodev); + qat_pci_devs[qat_pci_dev->qat_dev_id].asym_rte_dev.name = NULL; + qat_pci_dev->asym_dev = NULL; + + return 0; +} + +static struct cryptodev_driver qat_crypto_drv; +RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, + cryptodev_qat_asym_driver, + qat_asym_driver_id); diff --git a/drivers/crypto/qat/qat_asym_refactor.h b/drivers/crypto/qat/qat_asym_refactor.h new file mode 100644 index 0000000000..9ecabdbe8f --- /dev/null +++ b/drivers/crypto/qat/qat_asym_refactor.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Intel Corporation + */ + +#ifndef _QAT_ASYM_H_ +#define _QAT_ASYM_H_ + +#include +#include +#include "icp_qat_fw_pke.h" +#include "qat_device.h" +#include "qat_crypto.h" +#include "icp_qat_fw.h" + +/** Intel(R) QAT Asymmetric Crypto PMD driver name */ +#define CRYPTODEV_NAME_QAT_ASYM_PMD crypto_qat_asym + +typedef uint64_t large_int_ptr; +#define MAX_PKE_PARAMS 8 +#define QAT_PKE_MAX_LN_SIZE 512 +#define _PKE_ALIGN_ __rte_aligned(8) + +#define QAT_ASYM_MAX_PARAMS 8 +#define QAT_ASYM_MODINV_NUM_IN_PARAMS 2 +#define QAT_ASYM_MODINV_NUM_OUT_PARAMS 1 +#define QAT_ASYM_MODEXP_NUM_IN_PARAMS 3 +#define QAT_ASYM_MODEXP_NUM_OUT_PARAMS 1 +#define QAT_ASYM_RSA_NUM_IN_PARAMS 3 +#define QAT_ASYM_RSA_NUM_OUT_PARAMS 1 +#define QAT_ASYM_RSA_QT_NUM_IN_PARAMS 6 + +/** + * helper function to add an asym capability + * + **/ +#define QAT_ASYM_CAP(n, o, l, r, i) \ + { \ + .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC, \ + {.asym = { \ + .xform_capa = { \ + .xform_type = RTE_CRYPTO_ASYM_XFORM_##n,\ + .op_types = o, \ + { \ + .modlen = { \ + .min = l, \ + .max = r, \ + .increment = i \ + }, } \ + } \ + }, \ + } \ + } + +struct qat_asym_op_cookie { + size_t alg_size; + uint64_t error; + rte_iova_t input_addr; + rte_iova_t output_addr; + large_int_ptr input_params_ptrs[MAX_PKE_PARAMS] _PKE_ALIGN_; + large_int_ptr output_params_ptrs[MAX_PKE_PARAMS] _PKE_ALIGN_; + union { + uint8_t input_array[MAX_PKE_PARAMS][QAT_PKE_MAX_LN_SIZE]; + uint8_t input_buffer[MAX_PKE_PARAMS * QAT_PKE_MAX_LN_SIZE]; + } _PKE_ALIGN_; + uint8_t output_array[MAX_PKE_PARAMS][QAT_PKE_MAX_LN_SIZE] _PKE_ALIGN_; +} _PKE_ALIGN_; + +struct qat_asym_session { + struct icp_qat_fw_pke_request req_tmpl; + struct rte_crypto_asym_xform *xform; +}; + +static inline void +qat_fill_req_tmpl(struct icp_qat_fw_pke_request *qat_req) +{ + memset(qat_req, 0, sizeof(*qat_req)); + qat_req->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE; + + qat_req->pke_hdr.hdr_flags = + ICP_QAT_FW_COMN_HDR_FLAGS_BUILD + (ICP_QAT_FW_COMN_REQ_FLAG_SET); +} + +static inline void +qat_asym_build_req_tmpl(void *sess_private_data) +{ + struct icp_qat_fw_pke_request *qat_req; + struct qat_asym_session *session = sess_private_data; + + qat_req = &session->req_tmpl; + qat_fill_req_tmpl(qat_req); +} + +int +qat_asym_session_configure(struct rte_cryptodev *dev, + struct rte_crypto_asym_xform *xform, + struct rte_cryptodev_asym_session *sess, + struct rte_mempool *mempool); + +unsigned int +qat_asym_session_get_private_size(struct rte_cryptodev *dev); + +void +qat_asym_session_clear(struct rte_cryptodev *dev, + struct rte_cryptodev_asym_session *sess); + +/* + * Process PKE response received from outgoing queue of QAT + * + * @param op a ptr to the rte_crypto_op referred to by + * the response message is returned in this param + * @param resp icp_qat_fw_pke_resp message received from + * outgoing fw message queue + * @param op_cookie Cookie pointer that holds private metadata + * @param dequeue_err_count Error count number pointer + * + */ +int +qat_asym_process_response(void __rte_unused * *op, uint8_t *resp, + void *op_cookie, __rte_unused uint64_t *dequeue_err_count); + +void +qat_asym_init_op_cookie(void *cookie); + +#endif /* _QAT_ASYM_H_ */ -- 2.17.1