From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 06370A0C61; Fri, 5 Nov 2021 01:19:57 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id EA1CD427A7; Fri, 5 Nov 2021 01:19:45 +0100 (CET) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by mails.dpdk.org (Postfix) with ESMTP id 1E96F42710 for ; Fri, 5 Nov 2021 01:19:40 +0100 (CET) X-IronPort-AV: E=McAfee;i="6200,9189,10158"; a="212563720" X-IronPort-AV: E=Sophos;i="5.87,209,1631602800"; d="scan'208";a="212563720" Received: from orsmga002.jf.intel.com ([10.7.209.21]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 04 Nov 2021 17:19:40 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.87,209,1631602800"; d="scan'208";a="468671798" Received: from silpixa00400272.ir.intel.com (HELO silpixa00400272.ger.corp.intel.com) ([10.237.223.111]) by orsmga002.jf.intel.com with ESMTP; 04 Nov 2021 17:19:39 -0700 From: Kai Ji To: dev@dpdk.org Cc: gakhil@marvell.com, Kai Ji Date: Fri, 5 Nov 2021 00:19:24 +0000 Message-Id: <20211105001932.28784-4-kai.ji@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20211105001932.28784-1-kai.ji@intel.com> References: <20211102134913.21148-1-kai.ji@intel.com> <20211105001932.28784-1-kai.ji@intel.com> Subject: [dpdk-dev] [dpdk-dev v4 03/11] crypto/qat: rework session APIs X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" The patch introduce set_session methods to qat gen dev ops Replace min_qat_dev_gen_id with dev_id, the session will become invalid if the device generation id is not matching during session init and crypto ops Signed-off-by: Kai Ji --- drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c | 87 ++++++- drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 253 +++++++++++++++++++ drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 121 +++++++++ drivers/crypto/qat/dev/qat_sym_pmd_gen1.c | 62 +++++ drivers/crypto/qat/qat_crypto.c | 1 + drivers/crypto/qat/qat_crypto.h | 5 + drivers/crypto/qat/qat_sym.c | 8 +- drivers/crypto/qat/qat_sym_session.c | 106 +------- drivers/crypto/qat/qat_sym_session.h | 2 +- 9 files changed, 544 insertions(+), 101 deletions(-) diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c index b4ec440e05..621ff85638 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c @@ -166,6 +166,90 @@ qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev *dev, uint16_t qp_id, return 0; } +void +qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session, + uint8_t hash_flag) +{ + struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr; + struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl = + (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *) + session->fw_req.cd_ctrl.content_desc_ctrl_lw; + + /* Set the Use Extended Protocol Flags bit in LW 1 */ + QAT_FIELD_SET(header->comn_req_flags, + QAT_COMN_EXT_FLAGS_USED, + QAT_COMN_EXT_FLAGS_BITPOS, + QAT_COMN_EXT_FLAGS_MASK); + + /* Set Hash Flags in LW 28 */ + cd_ctrl->hash_flags |= hash_flag; + + /* Set proto flags in LW 1 */ + switch (session->qat_cipher_alg) { + case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_SNOW_3G_PROTO); + ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET( + header->serv_specif_flags, 0); + break; + case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_PROTO); + ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET( + header->serv_specif_flags, + ICP_QAT_FW_LA_ZUC_3G_PROTO); + break; + default: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_PROTO); + ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET( + header->serv_specif_flags, 0); + break; + } +} + +static int +qat_sym_crypto_set_session_gen2(void *cdev, void *session) +{ + struct rte_cryptodev *dev = cdev; + struct qat_sym_session *ctx = session; + const struct qat_cryptodev_private *qat_private = + dev->data->dev_private; + int ret; + + ret = qat_sym_crypto_set_session_gen1(cdev, session); + if (ret == 0 || ret != -ENOTSUP) + return ret; + + /* GEN1 returning -ENOTSUP as it cannot handle some mixed algo, + * but some are not supported by GEN2, so checking here + */ + if ((qat_private->internal_capabilities & + QAT_SYM_CAP_MIXED_CRYPTO) == 0) + return -ENOTSUP; + + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, + 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS); + } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, + 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS); + } else if ((ctx->aes_cmac || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) && + (ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || + ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, 0); + } + + return 0; +} + struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = { /* Device related operations */ @@ -204,9 +288,10 @@ RTE_INIT(qat_sym_crypto_gen2_init) qat_sym_gen_dev_ops[QAT_GEN2].cryptodev_ops = &qat_sym_crypto_ops_gen2; qat_sym_gen_dev_ops[QAT_GEN2].get_capabilities = qat_sym_crypto_cap_get_gen2; + qat_sym_gen_dev_ops[QAT_GEN2].set_session = + qat_sym_crypto_set_session_gen2; qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags = qat_sym_crypto_feature_flags_get_gen1; - #ifdef RTE_LIB_SECURITY qat_sym_gen_dev_ops[QAT_GEN2].create_security_ctx = qat_sym_create_security_gen1; diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c index d3336cf4a1..6baf1810ed 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c @@ -143,6 +143,256 @@ qat_sym_crypto_cap_get_gen3(struct qat_pci_device *qat_dev __rte_unused) return capa_info; } +static __rte_always_inline void +enqueue_one_aead_job_gen3(struct qat_sym_session *ctx, + struct icp_qat_fw_la_bulk_req *req, + struct rte_crypto_va_iova_ptr *iv, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *aad, + union rte_crypto_sym_ofs ofs, uint32_t data_len) +{ + if (ctx->is_single_pass) { + struct icp_qat_fw_la_cipher_req_params *cipher_param = + (void *)&req->serv_specif_rqpars; + + /* QAT GEN3 uses single pass to treat AEAD as + * cipher operation + */ + cipher_param = (void *)&req->serv_specif_rqpars; + + qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req); + cipher_param->cipher_offset = ofs.ofs.cipher.head; + cipher_param->cipher_length = data_len - ofs.ofs.cipher.head - + ofs.ofs.cipher.tail; + + cipher_param->spc_aad_addr = aad->iova; + cipher_param->spc_auth_res_addr = digest->iova; + + return; + } + + enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len); +} + +static __rte_always_inline void +enqueue_one_auth_job_gen3(struct qat_sym_session *ctx, + struct qat_sym_op_cookie *cookie, + struct icp_qat_fw_la_bulk_req *req, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *auth_iv, + union rte_crypto_sym_ofs ofs, uint32_t data_len) +{ + struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl; + struct icp_qat_fw_la_cipher_req_params *cipher_param; + uint32_t ver_key_offset; + uint32_t auth_data_len = data_len - ofs.ofs.auth.head - + ofs.ofs.auth.tail; + + if (!ctx->is_single_pass_gmac || + (auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) { + enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs, + data_len); + return; + } + + cipher_cd_ctrl = (void *) &req->cd_ctrl; + cipher_param = (void *)&req->serv_specif_rqpars; + ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) + + ICP_QAT_HW_GALOIS_128_STATE1_SZ + + ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ + + ICP_QAT_HW_GALOIS_E_CTR0_SZ + + sizeof(struct icp_qat_hw_cipher_config); + + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { + /* AES-GMAC */ + qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length, + req); + } + + /* Fill separate Content Descriptor for this op */ + rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key, + ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ? + ctx->cd.cipher.key : + RTE_PTR_ADD(&ctx->cd, ver_key_offset), + ctx->auth_key_length); + cookie->opt.spc_gmac.cd_cipher.cipher_config.val = + ICP_QAT_HW_CIPHER_CONFIG_BUILD( + ICP_QAT_HW_CIPHER_AEAD_MODE, + ctx->qat_cipher_alg, + ICP_QAT_HW_CIPHER_NO_CONVERT, + (ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ? + ICP_QAT_HW_CIPHER_ENCRYPT : + ICP_QAT_HW_CIPHER_DECRYPT)); + QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val, + ctx->digest_length, + QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS, + QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK); + cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved = + ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len); + + /* Update the request */ + req->cd_pars.u.s.content_desc_addr = + cookie->opt.spc_gmac.cd_phys_addr; + req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL( + sizeof(struct icp_qat_hw_cipher_config) + + ctx->auth_key_length, 8) >> 3; + req->comn_mid.src_length = data_len; + req->comn_mid.dst_length = 0; + + cipher_param->spc_aad_addr = 0; + cipher_param->spc_auth_res_addr = digest->iova; + cipher_param->spc_aad_sz = auth_data_len; + cipher_param->reserved = 0; + cipher_param->spc_auth_res_sz = ctx->digest_length; + + req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER; + cipher_cd_ctrl->cipher_cfg_offset = 0; + ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); + ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET( + req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_SINGLE_PASS_PROTO); + ICP_QAT_FW_LA_PROTO_SET( + req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_NO_PROTO); +} + +static int +qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx, + uint8_t *out_msg, void *op_cookie) +{ + register struct icp_qat_fw_la_bulk_req *req; + struct rte_crypto_op *op = in_op; + struct qat_sym_op_cookie *cookie = op_cookie; + struct rte_crypto_sgl in_sgl, out_sgl; + struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER], + out_vec[QAT_SYM_SGL_MAX_NUMBER]; + struct rte_crypto_va_iova_ptr cipher_iv; + struct rte_crypto_va_iova_ptr aad; + struct rte_crypto_va_iova_ptr digest; + union rte_crypto_sym_ofs ofs; + int32_t total_len; + + in_sgl.vec = in_vec; + out_sgl.vec = out_vec; + + req = (struct icp_qat_fw_la_bulk_req *)out_msg; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl, + &cipher_iv, &aad, &digest); + if (unlikely(ofs.raw == UINT64_MAX)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + total_len = qat_sym_build_req_set_data(req, in_op, cookie, + in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num); + if (unlikely(total_len < 0)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs, + total_len); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv, + NULL, &aad, &digest); +#endif + + return 0; +} + +static int +qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx, + uint8_t *out_msg, void *op_cookie) +{ + register struct icp_qat_fw_la_bulk_req *req; + struct rte_crypto_op *op = in_op; + struct qat_sym_op_cookie *cookie = op_cookie; + struct rte_crypto_sgl in_sgl, out_sgl; + struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER], + out_vec[QAT_SYM_SGL_MAX_NUMBER]; + struct rte_crypto_va_iova_ptr auth_iv; + struct rte_crypto_va_iova_ptr digest; + union rte_crypto_sym_ofs ofs; + int32_t total_len; + + in_sgl.vec = in_vec; + out_sgl.vec = out_vec; + + req = (struct icp_qat_fw_la_bulk_req *)out_msg; + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); + + ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl, + NULL, &auth_iv, &digest); + if (unlikely(ofs.raw == UINT64_MAX)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + total_len = qat_sym_build_req_set_data(req, in_op, cookie, + in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num); + if (unlikely(total_len < 0)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv, + ofs, total_len); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL, + &auth_iv, NULL, &digest); +#endif + + return 0; +} + +static int +qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session) +{ + struct qat_sym_session *ctx = session; + enum rte_proc_type_t proc_type = rte_eal_process_type(); + int ret; + + ret = qat_sym_crypto_set_session_gen1(cdev, session); + /* special single pass build request for GEN3 */ + if (ctx->is_single_pass) + ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3; + else if (ctx->is_single_pass_gmac) + ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3; + + if (ret == 0) + return ret; + + /* GEN1 returning -ENOTSUP as it cannot handle some mixed algo, + * this is addressed by GEN3 + */ + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, + 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS); + } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, + 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS); + } else if ((ctx->aes_cmac || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) && + (ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || + ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, 0); + } + + return 0; +} + RTE_INIT(qat_sym_crypto_gen3_init) { qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1; @@ -150,6 +400,8 @@ RTE_INIT(qat_sym_crypto_gen3_init) qat_sym_crypto_cap_get_gen3; qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags = qat_sym_crypto_feature_flags_get_gen1; + qat_sym_gen_dev_ops[QAT_GEN3].set_session = + qat_sym_crypto_set_session_gen3; #ifdef RTE_LIB_SECURITY qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx = qat_sym_create_security_gen1; @@ -161,4 +413,5 @@ RTE_INIT(qat_asym_crypto_gen3_init) qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops = NULL; qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities = NULL; qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags = NULL; + qat_asym_gen_dev_ops[QAT_GEN3].set_session = NULL; } diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c index 37a58c026f..fa6a7ee2fd 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c @@ -103,11 +103,131 @@ qat_sym_crypto_cap_get_gen4(struct qat_pci_device *qat_dev __rte_unused) return capa_info; } +static __rte_always_inline void +enqueue_one_aead_job_gen4(struct qat_sym_session *ctx, + struct icp_qat_fw_la_bulk_req *req, + struct rte_crypto_va_iova_ptr *iv, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *aad, + union rte_crypto_sym_ofs ofs, uint32_t data_len) +{ + if (ctx->is_single_pass && ctx->is_ucs) { + struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 = + (void *)&req->serv_specif_rqpars; + struct icp_qat_fw_la_cipher_req_params *cipher_param = + (void *)&req->serv_specif_rqpars; + + /* QAT GEN4 uses single pass to treat AEAD as cipher + * operation + */ + qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, + req); + cipher_param->cipher_offset = ofs.ofs.cipher.head; + cipher_param->cipher_length = data_len - + ofs.ofs.cipher.head - ofs.ofs.cipher.tail; + + cipher_param_20->spc_aad_addr = aad->iova; + cipher_param_20->spc_auth_res_addr = digest->iova; + + return; + } + + enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len); +} + +static int +qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx, + uint8_t *out_msg, void *op_cookie) +{ + register struct icp_qat_fw_la_bulk_req *qat_req; + struct rte_crypto_op *op = in_op; + struct qat_sym_op_cookie *cookie = op_cookie; + struct rte_crypto_sgl in_sgl, out_sgl; + struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER], + out_vec[QAT_SYM_SGL_MAX_NUMBER]; + struct rte_crypto_va_iova_ptr cipher_iv; + struct rte_crypto_va_iova_ptr aad; + struct rte_crypto_va_iova_ptr digest; + union rte_crypto_sym_ofs ofs; + int32_t total_len; + + in_sgl.vec = in_vec; + out_sgl.vec = out_vec; + + qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg; + rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req)); + + ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl, + &cipher_iv, &aad, &digest); + if (unlikely(ofs.raw == UINT64_MAX)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie, + in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num); + if (unlikely(total_len < 0)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + + enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs, + total_len); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv, + NULL, &aad, &digest); +#endif + + return 0; +} + +static int +qat_sym_crypto_set_session_gen4(void *cdev, void *session) +{ + struct qat_sym_session *ctx = session; + enum rte_proc_type_t proc_type = rte_eal_process_type(); + int ret; + + ret = qat_sym_crypto_set_session_gen1(cdev, session); + /* special single pass build request for GEN4 */ + if (ctx->is_single_pass && ctx->is_ucs) + ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4; + if (ret == 0) + return ret; + + /* GEN1 returning -ENOTSUP as it cannot handle some mixed algo, + * this is addressed by GEN4 + */ + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, + 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS); + } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, + 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS); + } else if ((ctx->aes_cmac || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) && + (ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || + ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) { + qat_sym_session_set_ext_hash_flags_gen2(ctx, 0); + } + + return 0; +} + RTE_INIT(qat_sym_crypto_gen4_init) { qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1; qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities = qat_sym_crypto_cap_get_gen4; + qat_sym_gen_dev_ops[QAT_GEN4].set_session = + qat_sym_crypto_set_session_gen4; qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags = qat_sym_crypto_feature_flags_get_gen1; #ifdef RTE_LIB_SECURITY @@ -121,4 +241,5 @@ RTE_INIT(qat_asym_crypto_gen4_init) qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops = NULL; qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities = NULL; qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags = NULL; + qat_asym_gen_dev_ops[QAT_GEN4].set_session = NULL; } diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c index 5b96e626f8..bcdf0172fe 100644 --- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c +++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c @@ -6,6 +6,7 @@ #ifdef RTE_LIB_SECURITY #include #endif +#include #include "adf_transport_access_macros.h" #include "icp_qat_fw.h" @@ -454,12 +455,73 @@ qat_sym_create_security_gen1(void *cryptodev) } #endif +int +qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session) +{ + struct qat_sym_session *ctx = session; + qat_sym_build_request_t build_request = NULL; + enum rte_proc_type_t proc_type = rte_eal_process_type(); + int handle_mixed = 0; + + if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER || + ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) && + !ctx->is_gmac) { + /* AES-GCM or AES-CCM */ + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 || + (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128 + && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE + && ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) { + /* do_aead = 1; */ + build_request = qat_sym_build_op_aead_gen1; + } else { + /* do_auth = 1; do_cipher = 1; */ + build_request = qat_sym_build_op_chain_gen1; + handle_mixed = 1; + } + } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) { + /* do_auth = 1; do_cipher = 0;*/ + build_request = qat_sym_build_op_auth_gen1; + } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { + /* do_auth = 0; do_cipher = 1; */ + build_request = qat_sym_build_op_cipher_gen1; + } + + if (!build_request) + return 0; + ctx->build_request[proc_type] = build_request; + + if (!handle_mixed) + return 0; + + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + return -ENOTSUP; + } else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 && + ctx->qat_cipher_alg != + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { + return -ENOTSUP; + } else if ((ctx->aes_cmac || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) && + (ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || + ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) { + return -ENOTSUP; + } + + return 0; +} RTE_INIT(qat_sym_crypto_gen1_init) { qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1; qat_sym_gen_dev_ops[QAT_GEN1].get_capabilities = qat_sym_crypto_cap_get_gen1; + qat_sym_gen_dev_ops[QAT_GEN1].set_session = + qat_sym_crypto_set_session_gen1; qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags = qat_sym_crypto_feature_flags_get_gen1; #ifdef RTE_LIB_SECURITY diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c index 84c26a8062..ee878e47f1 100644 --- a/drivers/crypto/qat/qat_crypto.c +++ b/drivers/crypto/qat/qat_crypto.c @@ -2,6 +2,7 @@ * Copyright(c) 2021 Intel Corporation */ +#include #include "qat_device.h" #include "qat_qp.h" #include "qat_crypto.h" diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h index 6eaa15b975..a32c716d28 100644 --- a/drivers/crypto/qat/qat_crypto.h +++ b/drivers/crypto/qat/qat_crypto.h @@ -48,15 +48,20 @@ typedef uint64_t (*get_feature_flags_t)(struct qat_pci_device *qat_dev); typedef void * (*create_security_ctx_t)(void *cryptodev); +typedef int (*set_session_t)(void *cryptodev, void *session); + struct qat_crypto_gen_dev_ops { get_feature_flags_t get_feature_flags; get_capabilities_info_t get_capabilities; struct rte_cryptodev_ops *cryptodev_ops; + set_session_t set_session; #ifdef RTE_LIB_SECURITY create_security_ctx_t create_security_ctx; #endif }; +extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[]; + int qat_cryptodev_config(struct rte_cryptodev *dev, struct rte_cryptodev_config *config); diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c index 93b257522b..7481607ff8 100644 --- a/drivers/crypto/qat/qat_sym.c +++ b/drivers/crypto/qat/qat_sym.c @@ -212,7 +212,7 @@ handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op, int qat_sym_build_request(void *in_op, uint8_t *out_msg, - void *op_cookie, enum qat_device_gen qat_dev_gen) + void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen) { int ret = 0; struct qat_sym_session *ctx = NULL; @@ -277,12 +277,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg, return -EINVAL; } - if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) { - QAT_DP_LOG(ERR, "Session alg not supported on this device gen"); - op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; - return -EINVAL; - } - qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg; rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req)); qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op; diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c index 8ca475ca8b..52837d7c9c 100644 --- a/drivers/crypto/qat/qat_sym_session.c +++ b/drivers/crypto/qat/qat_sym_session.c @@ -486,80 +486,6 @@ qat_sym_session_configure(struct rte_cryptodev *dev, return 0; } -static void -qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session, - uint8_t hash_flag) -{ - struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr; - struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl = - (struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *) - session->fw_req.cd_ctrl.content_desc_ctrl_lw; - - /* Set the Use Extended Protocol Flags bit in LW 1 */ - QAT_FIELD_SET(header->comn_req_flags, - QAT_COMN_EXT_FLAGS_USED, - QAT_COMN_EXT_FLAGS_BITPOS, - QAT_COMN_EXT_FLAGS_MASK); - - /* Set Hash Flags in LW 28 */ - cd_ctrl->hash_flags |= hash_flag; - - /* Set proto flags in LW 1 */ - switch (session->qat_cipher_alg) { - case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2: - ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_SNOW_3G_PROTO); - ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET( - header->serv_specif_flags, 0); - break; - case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3: - ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_PROTO); - ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET( - header->serv_specif_flags, - ICP_QAT_FW_LA_ZUC_3G_PROTO); - break; - default: - ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_PROTO); - ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET( - header->serv_specif_flags, 0); - break; - } -} - -static void -qat_sym_session_handle_mixed(const struct rte_cryptodev *dev, - struct qat_sym_session *session) -{ - const struct qat_cryptodev_private *qat_private = - dev->data->dev_private; - enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities & - QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3; - - if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 && - session->qat_cipher_alg != - ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { - session->min_qat_dev_gen = min_dev_gen; - qat_sym_session_set_ext_hash_flags(session, - 1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS); - } else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 && - session->qat_cipher_alg != - ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { - session->min_qat_dev_gen = min_dev_gen; - qat_sym_session_set_ext_hash_flags(session, - 1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS); - } else if ((session->aes_cmac || - session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) && - (session->qat_cipher_alg == - ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || - session->qat_cipher_alg == - ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) { - session->min_qat_dev_gen = min_dev_gen; - qat_sym_session_set_ext_hash_flags(session, 0); - } -} - int qat_sym_session_set_parameters(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, void *session_private) @@ -569,7 +495,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen; int ret; int qat_cmd_id; - int handle_mixed = 0; /* Verify the session physical address is known */ rte_iova_t session_paddr = rte_mempool_virt2iova(session); @@ -584,7 +509,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, session->cd_paddr = session_paddr + offsetof(struct qat_sym_session, cd); - session->min_qat_dev_gen = QAT_GEN1; + session->dev_id = internals->dev_id; session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE; session->is_ucs = 0; @@ -625,7 +550,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, xform, session); if (ret < 0) return ret; - handle_mixed = 1; } break; case ICP_QAT_FW_LA_CMD_HASH_CIPHER: @@ -643,7 +567,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, xform, session); if (ret < 0) return ret; - handle_mixed = 1; } break; case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM: @@ -664,12 +587,9 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev, return -ENOTSUP; } qat_sym_session_finalize(session); - if (handle_mixed) { - /* Special handling of mixed hash+cipher algorithms */ - qat_sym_session_handle_mixed(dev, session); - } - return 0; + return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev, + (void *)session); } static int @@ -678,7 +598,6 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session, { session->is_single_pass = 1; session->is_auth = 1; - session->min_qat_dev_gen = QAT_GEN3; session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER; /* Chacha-Poly is special case that use QAT CTR mode */ if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) { @@ -1205,9 +1124,10 @@ static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out) return 0; } -static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg, - uint8_t *data_in, - uint8_t *data_out) +static int +partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg, + uint8_t *data_in, + uint8_t *data_out) { int digest_size; uint8_t digest[qat_hash_get_digest_size( @@ -1654,7 +1574,6 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc, cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3; cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC; - cdesc->min_qat_dev_gen = QAT_GEN2; } else { total_key_size = cipherkeylen; cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3; @@ -2002,7 +1921,6 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc, memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen); cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3; - cdesc->min_qat_dev_gen = QAT_GEN2; break; case ICP_QAT_HW_AUTH_ALGO_MD5: @@ -2263,8 +2181,6 @@ qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev, session->cd_paddr = session_paddr + offsetof(struct qat_sym_session, cd); - session->min_qat_dev_gen = QAT_GEN1; - /* Get requested QAT command id - should be cipher */ qat_cmd_id = qat_get_cmd_id(xform); if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) { @@ -2289,6 +2205,9 @@ qat_security_session_create(void *dev, { void *sess_private_data; struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev; + struct qat_cryptodev_private *internals = cdev->data->dev_private; + enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen; + struct qat_sym_session *sym_session = NULL; int ret; if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL || @@ -2312,8 +2231,11 @@ qat_security_session_create(void *dev, } set_sec_session_private_data(sess, sess_private_data); + sym_session = (struct qat_sym_session *)sess_private_data; + sym_session->dev_id = internals->dev_id; - return ret; + return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev, + sess_private_data); } int diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h index 33f977a4e3..581bce3790 100644 --- a/drivers/crypto/qat/qat_sym_session.h +++ b/drivers/crypto/qat/qat_sym_session.h @@ -100,7 +100,7 @@ struct qat_sym_session { uint16_t auth_key_length; uint16_t digest_length; rte_spinlock_t lock; /* protects this struct */ - enum qat_device_gen min_qat_dev_gen; + uint16_t dev_id; uint8_t aes_cmac; uint8_t is_single_pass; uint8_t is_single_pass_gmac; -- 2.17.1