From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id CE0A5A0352; Fri, 28 Jan 2022 19:23:44 +0100 (CET) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id EAC10428BB; Fri, 28 Jan 2022 19:23:29 +0100 (CET) Received: from mga12.intel.com (mga12.intel.com [192.55.52.136]) by mails.dpdk.org (Postfix) with ESMTP id 6DE164286C for ; Fri, 28 Jan 2022 19:23:26 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1643394206; x=1674930206; h=from:to:cc:subject:date:message-id:in-reply-to: references; bh=GY04EUJ1XW8dfQSvderDZuhhtFo3NN7J7zCPvdmk5Po=; b=MFEzvXteYIK7Y0C7qlBuF0c4fWBagwsrrm6fHg1/SB9rUcDG5ubacJyv hvdT7G91nDOrD7OU9WSpeL89GRVQfEGRUEMQi//8uyORwujo+fhCPWV6J jTzYOtOmzXgLco1NT4bI8eV40erzDIaCMWUXKc/97LfYNu9PYjoL4lOyn 07rNyAQ1ZPus/JEVMxbCVoodX0vlrAS2IirQKZn4SW98H9Tu0zWMjcIdO wQFB1hQ3Clz7SV64YCCXTokoDOoTn9dsjjaKqkZquiDhaZBHfzt6r3qB4 yBnmJlrn4FIkGi4I95XOWyvPFSNdG0NDVXxlPL02RmGPYL+PKTUJZ9Fgo A==; X-IronPort-AV: E=McAfee;i="6200,9189,10240"; a="227149971" X-IronPort-AV: E=Sophos;i="5.88,324,1635231600"; d="scan'208";a="227149971" Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 28 Jan 2022 10:23:25 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.88,324,1635231600"; d="scan'208";a="675166605" Received: from silpixa00400272.ir.intel.com (HELO silpixa00400272.ger.corp.intel.com) ([10.237.223.111]) by fmsmga001.fm.intel.com with ESMTP; 28 Jan 2022 10:23:25 -0800 From: Kai Ji To: dev@dpdk.org Cc: Kai Ji Subject: [dpdk-dev v5 03/10] crypto/qat: qat generation specific enqueue Date: Fri, 28 Jan 2022 18:23:07 +0000 Message-Id: <20220128182314.23471-4-kai.ji@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20220128182314.23471-1-kai.ji@intel.com> References: <20211105001932.28784-12-kai.ji@intel.com> <20220128182314.23471-1-kai.ji@intel.com> X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org This patch add in specific aead & auth build op enqueue functions for QAT Gen3 & Gen4 Signed-off-by: Kai Ji --- drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 117 ++++++++++++++++++- drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 34 +++++- 2 files changed, 149 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c index d3336cf4a1..fca7af2b7e 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2017-2021 Intel Corporation + * Copyright(c) 2017-2022 Intel Corporation */ #include @@ -143,6 +143,121 @@ qat_sym_crypto_cap_get_gen3(struct qat_pci_device *qat_dev __rte_unused) return capa_info; } +static __rte_always_inline void +enqueue_one_aead_job_gen3(struct qat_sym_session *ctx, + struct icp_qat_fw_la_bulk_req *req, + struct rte_crypto_va_iova_ptr *iv, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *aad, + union rte_crypto_sym_ofs ofs, uint32_t data_len) +{ + if (ctx->is_single_pass) { + struct icp_qat_fw_la_cipher_req_params *cipher_param = + (void *)&req->serv_specif_rqpars; + + /* QAT GEN3 uses single pass to treat AEAD as + * cipher operation + */ + cipher_param = (void *)&req->serv_specif_rqpars; + + qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req); + cipher_param->cipher_offset = ofs.ofs.cipher.head; + cipher_param->cipher_length = data_len - ofs.ofs.cipher.head - + ofs.ofs.cipher.tail; + + cipher_param->spc_aad_addr = aad->iova; + cipher_param->spc_auth_res_addr = digest->iova; + + return; + } + + enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len); +} + +static __rte_always_inline void +enqueue_one_auth_job_gen3(struct qat_sym_session *ctx, + struct qat_sym_op_cookie *cookie, + struct icp_qat_fw_la_bulk_req *req, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *auth_iv, + union rte_crypto_sym_ofs ofs, uint32_t data_len) +{ + struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl; + struct icp_qat_fw_la_cipher_req_params *cipher_param; + uint32_t ver_key_offset; + uint32_t auth_data_len = data_len - ofs.ofs.auth.head - + ofs.ofs.auth.tail; + + if (!ctx->is_single_pass_gmac || + (auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) { + enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs, + data_len); + return; + } + + cipher_cd_ctrl = (void *) &req->cd_ctrl; + cipher_param = (void *)&req->serv_specif_rqpars; + ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) + + ICP_QAT_HW_GALOIS_128_STATE1_SZ + + ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ + + ICP_QAT_HW_GALOIS_E_CTR0_SZ + + sizeof(struct icp_qat_hw_cipher_config); + + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { + /* AES-GMAC */ + qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length, + req); + } + + /* Fill separate Content Descriptor for this op */ + rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key, + ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ? + ctx->cd.cipher.key : + RTE_PTR_ADD(&ctx->cd, ver_key_offset), + ctx->auth_key_length); + cookie->opt.spc_gmac.cd_cipher.cipher_config.val = + ICP_QAT_HW_CIPHER_CONFIG_BUILD( + ICP_QAT_HW_CIPHER_AEAD_MODE, + ctx->qat_cipher_alg, + ICP_QAT_HW_CIPHER_NO_CONVERT, + (ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ? + ICP_QAT_HW_CIPHER_ENCRYPT : + ICP_QAT_HW_CIPHER_DECRYPT)); + QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val, + ctx->digest_length, + QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS, + QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK); + cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved = + ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len); + + /* Update the request */ + req->cd_pars.u.s.content_desc_addr = + cookie->opt.spc_gmac.cd_phys_addr; + req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL( + sizeof(struct icp_qat_hw_cipher_config) + + ctx->auth_key_length, 8) >> 3; + req->comn_mid.src_length = data_len; + req->comn_mid.dst_length = 0; + + cipher_param->spc_aad_addr = 0; + cipher_param->spc_auth_res_addr = digest->iova; + cipher_param->spc_aad_sz = auth_data_len; + cipher_param->reserved = 0; + cipher_param->spc_auth_res_sz = ctx->digest_length; + + req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER; + cipher_cd_ctrl->cipher_cfg_offset = 0; + ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR); + ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET( + req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_SINGLE_PASS_PROTO); + ICP_QAT_FW_LA_PROTO_SET( + req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_NO_PROTO); +} + RTE_INIT(qat_sym_crypto_gen3_init) { qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1; diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c index 37a58c026f..8462c0b9b1 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2017-2021 Intel Corporation + * Copyright(c) 2017-2022 Intel Corporation */ #include @@ -103,6 +103,38 @@ qat_sym_crypto_cap_get_gen4(struct qat_pci_device *qat_dev __rte_unused) return capa_info; } +static __rte_always_inline void +enqueue_one_aead_job_gen4(struct qat_sym_session *ctx, + struct icp_qat_fw_la_bulk_req *req, + struct rte_crypto_va_iova_ptr *iv, + struct rte_crypto_va_iova_ptr *digest, + struct rte_crypto_va_iova_ptr *aad, + union rte_crypto_sym_ofs ofs, uint32_t data_len) +{ + if (ctx->is_single_pass && ctx->is_ucs) { + struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 = + (void *)&req->serv_specif_rqpars; + struct icp_qat_fw_la_cipher_req_params *cipher_param = + (void *)&req->serv_specif_rqpars; + + /* QAT GEN4 uses single pass to treat AEAD as cipher + * operation + */ + qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, + req); + cipher_param->cipher_offset = ofs.ofs.cipher.head; + cipher_param->cipher_length = data_len - + ofs.ofs.cipher.head - ofs.ofs.cipher.tail; + + cipher_param_20->spc_aad_addr = aad->iova; + cipher_param_20->spc_auth_res_addr = digest->iova; + + return; + } + + enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len); +} + RTE_INIT(qat_sym_crypto_gen4_init) { qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1; -- 2.17.1