From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 6792CA00C2; Fri, 4 Feb 2022 19:51:35 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 4C62341199; Fri, 4 Feb 2022 19:51:10 +0100 (CET) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by mails.dpdk.org (Postfix) with ESMTP id EB1E3410E5 for ; Fri, 4 Feb 2022 19:51:07 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1644000668; x=1675536668; h=from:to:cc:subject:date:message-id:in-reply-to: references; bh=RA5vgdSRKqFwOLDUsWJI+WkoXlDAhD42iIY0fOBTT4c=; b=T+mXTJI1ANabmdIDsjExdeXEi9j0oFi2ifX0yZ/S4x3o32ufHBPF4Sxe axORdibK99bXFzihAg0l4TYWyZRgCEcwWbdj6KBY40euxrUK1fUcIsiiB OLYnnUapRGzqXEn/kc3k9/1J0dZKqh8hjXh8bOzmyKoRw7eAFhJFjbxYp O4wtuk51RWVD2qhxCglxLwUb0pRXl9EgYonO10Z1dNCesl7wNIs49Eugn Mwwo2+pEh8d5QSin3Y/2aw2m9+wzvo9/bgF9S8I2xT5GN/zWmhiCVuk1O L20ejgenjeXa8mmSCjDSpeHFl1Ii+QwSjN+IYrJYjhaMa8tAm0o2OJ+5g w==; X-IronPort-AV: E=McAfee;i="6200,9189,10248"; a="248183479" X-IronPort-AV: E=Sophos;i="5.88,343,1635231600"; d="scan'208";a="248183479" Received: from orsmga004.jf.intel.com ([10.7.209.38]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 04 Feb 2022 10:51:06 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.88,343,1635231600"; d="scan'208";a="631788576" Received: from silpixa00400465.ir.intel.com ([10.55.128.22]) by orsmga004.jf.intel.com with ESMTP; 04 Feb 2022 10:51:05 -0800 From: Kai Ji To: dev@dpdk.org Cc: Kai Ji Subject: [dpdk-dev v6 04/10] crypto/qat: rework session APIs Date: Sat, 5 Feb 2022 02:50:51 +0800 Message-Id: <20220204185057.29893-5-kai.ji@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20220204185057.29893-1-kai.ji@intel.com> References: <20220128182314.23471-1-kai.ji@intel.com> <20220204185057.29893-1-kai.ji@intel.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org This patch introduces the set_session methods for different generations of QAT. In addition, the patch replaces 'min_qat_dev_gen_id' with 'qat_dev_gen'. Thus, the session no longer allow to be created by one generation of QAT used by another generation. Signed-off-by: Kai Ji --- drivers/crypto/qat/dev/qat_asym_pmd_gen1.c | 9 +- drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c | 91 +++++++++++- drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 139 +++++++++++++++++++ drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 91 ++++++++++++ drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 3 + drivers/crypto/qat/dev/qat_sym_pmd_gen1.c | 64 +++++++++ drivers/crypto/qat/qat_crypto.h | 8 +- drivers/crypto/qat/qat_sym.c | 12 +- drivers/crypto/qat/qat_sym_session.c | 113 +++------------ drivers/crypto/qat/qat_sym_session.h | 2 +- 10 files changed, 425 insertions(+), 107 deletions(-) diff --git a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c index 9ed1f21d9d..01a897a21f 100644 --- a/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c +++ b/drivers/crypto/qat/dev/qat_asym_pmd_gen1.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2017-2021 Intel Corporation + * Copyright(c) 2017-2022 Intel Corporation */ #include @@ -65,6 +65,13 @@ qat_asym_crypto_feature_flags_get_gen1( return feature_flags; } +int +qat_asym_crypto_set_session_gen1(void *cdev __rte_unused, + void *session __rte_unused) +{ + return 0; +} + RTE_INIT(qat_asym_crypto_gen1_init) { qat_asym_gen_dev_ops[QAT_GEN1].cryptodev_ops = diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c index b4ec440e05..64e6ae66ec 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2017-2021 Intel Corporation + * Copyright(c) 2017-2022 Intel Corporation */ #include @@ -166,6 +166,91 @@ qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev *dev, uint16_t qp_id, return 0; } +void +qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session, + uint8_t hash_flag) +{ + struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr; + struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl = + (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *) + session->fw_req.cd_ctrl.content_desc_ctrl_lw; + + /* Set the Use Extended Protocol Flags bit in LW 1 */ + QAT_FIELD_SET(header->comn_req_flags, + QAT_COMN_EXT_FLAGS_USED, + QAT_COMN_EXT_FLAGS_BITPOS, + QAT_COMN_EXT_FLAGS_MASK); + + /* Set Hash Flags in LW 28 */ + cd_ctrl->hash_flags |= hash_flag; + + /* Set proto flags in LW 1 */ + switch (session->qat_cipher_alg) { + case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_SNOW_3G_PROTO); + ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET( + header->serv_specif_flags, 0); + break; + case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_PROTO); + ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET( + header->serv_specif_flags, + ICP_QAT_FW_LA_ZUC_3G_PROTO); + break; + default: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_PROTO); + ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET( + header->serv_specif_flags, 0); + break; + } +} + +static int +qat_sym_crypto_set_session_gen2(void *cdev, void *session) +{ + struct rte_cryptodev *dev = cdev; + struct qat_sym_session *ctx = session; + const struct qat_cryptodev_private *qat_private = + dev->data->dev_private; + int ret; + + ret = qat_sym_crypto_set_session_gen1(cdev, session); + if (ret == -ENOTSUP) { + /* GEN1 returning -ENOTSUP as it cannot handle some mixed algo, + * but some are not supported by GEN2, so checking here + */ + if ((qat_private->internal_capabilities & + QAT_SYM_CAP_MIXED_CRYPTO) == 0) + return -ENOTSUP; + + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, + 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS); + } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, + 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS); + } else if ((ctx->aes_cmac || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) && + (ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || + ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, 0); + } + + ret = 0; + } + + return ret; +} + struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = { /* Device related operations */ @@ -204,6 +289,8 @@ RTE_INIT(qat_sym_crypto_gen2_init) qat_sym_gen_dev_ops[QAT_GEN2].cryptodev_ops = &qat_sym_crypto_ops_gen2; qat_sym_gen_dev_ops[QAT_GEN2].get_capabilities = qat_sym_crypto_cap_get_gen2; + qat_sym_gen_dev_ops[QAT_GEN2].set_session = + qat_sym_crypto_set_session_gen2; qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags = qat_sym_crypto_feature_flags_get_gen1; @@ -221,4 +308,6 @@ RTE_INIT(qat_asym_crypto_gen2_init) qat_asym_crypto_cap_get_gen1; qat_asym_gen_dev_ops[QAT_GEN2].get_feature_flags = qat_asym_crypto_feature_flags_get_gen1; + qat_asym_gen_dev_ops[QAT_GEN2].set_session = + qat_asym_crypto_set_session_gen1; } diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c index fca7af2b7e..db864d973a 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c @@ -258,6 +258,142 @@ enqueue_one_auth_job_gen3(struct qat_sym_session *ctx, ICP_QAT_FW_LA_NO_PROTO); } +static int +qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx, + uint8_t *out_msg, void *op_cookie) +{ + register struct icp_qat_fw_la_bulk_req *req; + struct rte_crypto_op *op = in_op; + struct qat_sym_op_cookie *cookie = op_cookie; + struct rte_crypto_sgl in_sgl, out_sgl; + struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER], + out_vec[QAT_SYM_SGL_MAX_NUMBER]; + struct rte_crypto_va_iova_ptr cipher_iv; + struct rte_crypto_va_iova_ptr aad; + struct rte_crypto_va_iova_ptr digest; + union rte_crypto_sym_ofs ofs; + int32_t total_len; + + in_sgl.vec = in_vec; + out_sgl.vec = out_vec; + + req = (struct icp_qat_fw_la_bulk_req *)out_msg; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl, + &cipher_iv, &aad, &digest); + if (unlikely(ofs.raw == UINT64_MAX)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + total_len = qat_sym_build_req_set_data(req, in_op, cookie, + in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num); + if (unlikely(total_len < 0)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs, + total_len); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv, + NULL, &aad, &digest); +#endif + + return 0; +} + +static int +qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx, + uint8_t *out_msg, void *op_cookie) +{ + register struct icp_qat_fw_la_bulk_req *req; + struct rte_crypto_op *op = in_op; + struct qat_sym_op_cookie *cookie = op_cookie; + struct rte_crypto_sgl in_sgl, out_sgl; + struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER], + out_vec[QAT_SYM_SGL_MAX_NUMBER]; + struct rte_crypto_va_iova_ptr auth_iv; + struct rte_crypto_va_iova_ptr digest; + union rte_crypto_sym_ofs ofs; + int32_t total_len; + + in_sgl.vec = in_vec; + out_sgl.vec = out_vec; + + req = (struct icp_qat_fw_la_bulk_req *)out_msg; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl, + NULL, &auth_iv, &digest); + if (unlikely(ofs.raw == UINT64_MAX)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + total_len = qat_sym_build_req_set_data(req, in_op, cookie, + in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num); + if (unlikely(total_len < 0)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv, + ofs, total_len); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL, + &auth_iv, NULL, &digest); +#endif + + return 0; +} + +static int +qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session) +{ + struct qat_sym_session *ctx = session; + enum rte_proc_type_t proc_type = rte_eal_process_type(); + int ret; + + ret = qat_sym_crypto_set_session_gen1(cdev, session); + /* special single pass build request for GEN3 */ + if (ctx->is_single_pass) + ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3; + else if (ctx->is_single_pass_gmac) + ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3; + + if (ret == -ENOTSUP) { + /* GEN1 returning -ENOTSUP as it cannot handle some mixed algo, + * this is addressed by GEN3 + */ + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, + 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS); + } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, + 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS); + } else if ((ctx->aes_cmac || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) && + (ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || + ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, 0); + } + + ret = 0; + } + + return ret; +} + RTE_INIT(qat_sym_crypto_gen3_init) { qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1; @@ -265,6 +401,8 @@ RTE_INIT(qat_sym_crypto_gen3_init) qat_sym_crypto_cap_get_gen3; qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags = qat_sym_crypto_feature_flags_get_gen1; + qat_sym_gen_dev_ops[QAT_GEN3].set_session = + qat_sym_crypto_set_session_gen3; #ifdef RTE_LIB_SECURITY qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx = qat_sym_create_security_gen1; @@ -276,4 +414,5 @@ RTE_INIT(qat_asym_crypto_gen3_init) qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops = NULL; qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities = NULL; qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags = NULL; + qat_asym_gen_dev_ops[QAT_GEN3].set_session = NULL; } diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c index 8462c0b9b1..7642a87d55 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c @@ -135,11 +135,101 @@ enqueue_one_aead_job_gen4(struct qat_sym_session *ctx, enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len); } +static int +qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx, + uint8_t *out_msg, void *op_cookie) +{ + register struct icp_qat_fw_la_bulk_req *qat_req; + struct rte_crypto_op *op = in_op; + struct qat_sym_op_cookie *cookie = op_cookie; + struct rte_crypto_sgl in_sgl, out_sgl; + struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER], + out_vec[QAT_SYM_SGL_MAX_NUMBER]; + struct rte_crypto_va_iova_ptr cipher_iv; + struct rte_crypto_va_iova_ptr aad; + struct rte_crypto_va_iova_ptr digest; + union rte_crypto_sym_ofs ofs; + int32_t total_len; + + in_sgl.vec = in_vec; + out_sgl.vec = out_vec; + + qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg; + rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req)); + + ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl, + &cipher_iv, &aad, &digest); + if (unlikely(ofs.raw == UINT64_MAX)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie, + in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num); + if (unlikely(total_len < 0)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs, + total_len); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv, + NULL, &aad, &digest); +#endif + + return 0; +} + +static int +qat_sym_crypto_set_session_gen4(void *cdev, void *session) +{ + struct qat_sym_session *ctx = session; + enum rte_proc_type_t proc_type = rte_eal_process_type(); + int ret; + + ret = qat_sym_crypto_set_session_gen1(cdev, session); + /* special single pass build request for GEN4 */ + if (ctx->is_single_pass && ctx->is_ucs) + ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4; + + if (ret == -ENOTSUP) { + /* GEN1 returning -ENOTSUP as it cannot handle some mixed algo, + * this is addressed by GEN4 + */ + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, + 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS); + } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, + 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS); + } else if ((ctx->aes_cmac || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) && + (ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || + ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, 0); + } + + ret = 0; + } + + return ret; +} + RTE_INIT(qat_sym_crypto_gen4_init) { qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1; qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities = qat_sym_crypto_cap_get_gen4; + qat_sym_gen_dev_ops[QAT_GEN4].set_session = + qat_sym_crypto_set_session_gen4; qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags = qat_sym_crypto_feature_flags_get_gen1; #ifdef RTE_LIB_SECURITY @@ -153,4 +243,5 @@ RTE_INIT(qat_asym_crypto_gen4_init) qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops = NULL; qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities = NULL; qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags = NULL; + qat_asym_gen_dev_ops[QAT_GEN4].set_session = NULL; } diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h index 1130e0e76f..96cdb97a26 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h @@ -856,6 +856,9 @@ qat_asym_crypto_cap_get_gen1(struct qat_pci_device *qat_dev); uint64_t qat_asym_crypto_feature_flags_get_gen1(struct qat_pci_device *qat_dev); +int +qat_asym_crypto_set_session_gen1(void *cryptodev, void *session); + #ifdef RTE_LIB_SECURITY extern struct rte_security_ops security_qat_ops_gen1; diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c index c429825a67..501132a448 100644 --- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c +++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c @@ -452,12 +452,76 @@ qat_sym_create_security_gen1(void *cryptodev) } #endif +int +qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session) +{ + struct qat_sym_session *ctx = session; + qat_sym_build_request_t build_request = NULL; + enum rte_proc_type_t proc_type = rte_eal_process_type(); + int handle_mixed = 0; + + if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER || + ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) && + !ctx->is_gmac) { + /* AES-GCM or AES-CCM */ + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 || + (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128 + && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE + && ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) { + /* do_aead = 1; */ + build_request = qat_sym_build_op_aead_gen1; + } else { + /* do_auth = 1; do_cipher = 1; */ + build_request = qat_sym_build_op_chain_gen1; + handle_mixed = 1; + } + } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) { + /* do_auth = 1; do_cipher = 0;*/ + build_request = qat_sym_build_op_auth_gen1; + } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { + /* do_auth = 0; do_cipher = 1; */ + build_request = qat_sym_build_op_cipher_gen1; + } + + if (build_request) + ctx->build_request[proc_type] = build_request; + else + return -EINVAL; + + /* no more work if not mixed op */ + if (!handle_mixed) + return 0; + + /* Check none supported algs if mixed */ + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + return -ENOTSUP; + } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { + return -ENOTSUP; + } else if ((ctx->aes_cmac || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) && + (ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || + ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) { + return -ENOTSUP; + } + + return 0; +} RTE_INIT(qat_sym_crypto_gen1_init) { qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1; qat_sym_gen_dev_ops[QAT_GEN1].get_capabilities = qat_sym_crypto_cap_get_gen1; + qat_sym_gen_dev_ops[QAT_GEN1].set_session = + qat_sym_crypto_set_session_gen1; qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags = qat_sym_crypto_feature_flags_get_gen1; #ifdef RTE_LIB_SECURITY diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h index 6eaa15b975..5ca76fcaa6 100644 --- a/drivers/crypto/qat/qat_crypto.h +++ b/drivers/crypto/qat/qat_crypto.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2021 Intel Corporation + * Copyright(c) 2022 Intel Corporation */ #ifndef _QAT_CRYPTO_H_ @@ -48,15 +48,21 @@ typedef uint64_t (*get_feature_flags_t)(struct qat_pci_device *qat_dev); typedef void * (*create_security_ctx_t)(void *cryptodev); +typedef int (*set_session_t)(void *cryptodev, void *session); + struct qat_crypto_gen_dev_ops { get_feature_flags_t get_feature_flags; get_capabilities_info_t get_capabilities; struct rte_cryptodev_ops *cryptodev_ops; + set_session_t set_session; #ifdef RTE_LIB_SECURITY create_security_ctx_t create_security_ctx; #endif }; +extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[]; +extern struct qat_crypto_gen_dev_ops qat_asym_gen_dev_ops[]; + int qat_cryptodev_config(struct rte_cryptodev *dev, struct rte_cryptodev_config *config); diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c index f814bf8f75..83bf55c933 100644 --- a/drivers/crypto/qat/qat_sym.c +++ b/drivers/crypto/qat/qat_sym.c @@ -13,6 +13,10 @@ #include "qat_sym.h" #include "dev/qat_crypto_pmd_gens.h" +uint8_t qat_sym_driver_id; + +struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[QAT_N_GENS]; + static inline void set_cipher_iv(uint16_t iv_length, uint16_t iv_offset, struct icp_qat_fw_la_cipher_req_params *cipher_param, @@ -126,7 +130,7 @@ handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op, int qat_sym_build_request(void *in_op, uint8_t *out_msg, - void *op_cookie, enum qat_device_gen qat_dev_gen) + void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen) { int ret = 0; struct qat_sym_session *ctx = NULL; @@ -191,12 +195,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg, return -EINVAL; } - if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) { - QAT_DP_LOG(ERR, "Session alg not supported on this device gen"); - op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; - return -EINVAL; - } - qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg; rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req)); qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op; diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c index 8ca475ca8b..3a880096c4 100644 --- a/drivers/crypto/qat/qat_sym_session.c +++ b/drivers/crypto/qat/qat_sym_session.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) - * Copyright(c) 2015-2019 Intel Corporation + * Copyright(c) 2015-2022 Intel Corporation */ #include /* Needed to calculate pre-compute values */ @@ -486,80 +486,6 @@ qat_sym_session_configure(struct rte_cryptodev *dev, return 0; } -static void -qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session, - uint8_t hash_flag) -{ - struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr; - struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl = - (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *) - session->fw_req.cd_ctrl.content_desc_ctrl_lw; - - /* Set the Use Extended Protocol Flags bit in LW 1 */ - QAT_FIELD_SET(header->comn_req_flags, - QAT_COMN_EXT_FLAGS_USED, - QAT_COMN_EXT_FLAGS_BITPOS, - QAT_COMN_EXT_FLAGS_MASK); - - /* Set Hash Flags in LW 28 */ - cd_ctrl->hash_flags |= hash_flag; - - /* Set proto flags in LW 1 */ - switch (session->qat_cipher_alg) { - case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2: - ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_SNOW_3G_PROTO); - ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET( - header->serv_specif_flags, 0); - break; - case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3: - ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_PROTO); - ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET( - header->serv_specif_flags, - ICP_QAT_FW_LA_ZUC_3G_PROTO); - break; - default: - ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_PROTO); - ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET( - header->serv_specif_flags, 0); - break; - } -} - -static void -qat_sym_session_handle_mixed(const struct rte_cryptodev *dev, - struct qat_sym_session *session) -{ - const struct qat_cryptodev_private *qat_private = - dev->data->dev_private; - enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities & - QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3; - - if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 && - session->qat_cipher_alg != - ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { - session->min_qat_dev_gen = min_dev_gen; - qat_sym_session_set_ext_hash_flags(session, - 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS); - } else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 && - session->qat_cipher_alg != - ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { - session->min_qat_dev_gen = min_dev_gen; - qat_sym_session_set_ext_hash_flags(session, - 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS); - } else if ((session->aes_cmac || - session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) && - (session->qat_cipher_alg == - ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || - session->qat_cipher_alg == - ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) { - session->min_qat_dev_gen = min_dev_gen; - qat_sym_session_set_ext_hash_flags(session, 0); - } -} - int qat_sym_session_set_parameters(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, void *session_private) @@ -569,7 +495,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen; int ret; int qat_cmd_id; - int handle_mixed = 0; /* Verify the session physical address is known */ rte_iova_t session_paddr = rte_mempool_virt2iova(session); @@ -584,7 +509,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, session->cd_paddr = session_paddr + offsetof(struct qat_sym_session, cd); - session->min_qat_dev_gen = QAT_GEN1; + session->dev_id = internals->dev_id; session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE; session->is_ucs = 0; @@ -625,7 +550,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, xform, session); if (ret < 0) return ret; - handle_mixed = 1; } break; case ICP_QAT_FW_LA_CMD_HASH_CIPHER: @@ -643,7 +567,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, xform, session); if (ret < 0) return ret; - handle_mixed = 1; } break; case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM: @@ -664,12 +587,9 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, return -ENOTSUP; } qat_sym_session_finalize(session); - if (handle_mixed) { - /* Special handling of mixed hash+cipher algorithms */ - qat_sym_session_handle_mixed(dev, session); - } - return 0; + return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev, + (void *)session); } static int @@ -678,14 +598,13 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session, { session->is_single_pass = 1; session->is_auth = 1; - session->min_qat_dev_gen = QAT_GEN3; session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER; /* Chacha-Poly is special case that use QAT CTR mode */ - if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) { + if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE; - } else { + else session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; - } + session->cipher_iv.offset = aead_xform->iv.offset; session->cipher_iv.length = aead_xform->iv.length; session->aad_len = aead_xform->aad_length; @@ -1205,9 +1124,9 @@ static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out) return 0; } -static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg, - uint8_t *data_in, - uint8_t *data_out) +static int +partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg, + uint8_t *data_in, uint8_t *data_out) { int digest_size; uint8_t digest[qat_hash_get_digest_size( @@ -1654,7 +1573,6 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc, cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3; cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC; - cdesc->min_qat_dev_gen = QAT_GEN2; } else { total_key_size = cipherkeylen; cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3; @@ -2002,7 +1920,6 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc, memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen); cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3; - cdesc->min_qat_dev_gen = QAT_GEN2; break; case ICP_QAT_HW_AUTH_ALGO_MD5: @@ -2263,8 +2180,6 @@ qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev, session->cd_paddr = session_paddr + offsetof(struct qat_sym_session, cd); - session->min_qat_dev_gen = QAT_GEN1; - /* Get requested QAT command id - should be cipher */ qat_cmd_id = qat_get_cmd_id(xform); if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) { @@ -2289,6 +2204,9 @@ qat_security_session_create(void *dev, { void *sess_private_data; struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; + struct qat_cryptodev_private *internals = cdev->data->dev_private; + enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen; + struct qat_sym_session *sym_session = NULL; int ret; if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL || @@ -2312,8 +2230,11 @@ qat_security_session_create(void *dev, } set_sec_session_private_data(sess, sess_private_data); + sym_session = (struct qat_sym_session *)sess_private_data; + sym_session->dev_id = internals->dev_id; - return ret; + return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev, + sess_private_data); } int diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h index fe875a7fd0..01908abd9e 100644 --- a/drivers/crypto/qat/qat_sym_session.h +++ b/drivers/crypto/qat/qat_sym_session.h @@ -105,7 +105,7 @@ struct qat_sym_session { uint16_t auth_key_length; uint16_t digest_length; rte_spinlock_t lock; /* protects this struct */ - enum qat_device_gen min_qat_dev_gen; + uint16_t dev_id; uint8_t aes_cmac; uint8_t is_single_pass; uint8_t is_single_pass_gmac; -- 2.17.1