From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id D954E2BD8 for ; Tue, 23 Feb 2016 14:04:56 +0100 (CET) Received: from orsmga003.jf.intel.com ([10.7.209.27]) by orsmga101.jf.intel.com with ESMTP; 23 Feb 2016 05:04:55 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.22,489,1449561600"; d="scan'208";a="751635240" Received: from sie-lab-214-241.ir.intel.com (HELO silpixa00382162.ir.intel.com) ([10.237.214.241]) by orsmga003.jf.intel.com with ESMTP; 23 Feb 2016 05:04:54 -0800 From: Deepak Kumar JAIN To: dev@dpdk.org Date: Tue, 23 Feb 2016 14:02:54 +0000 Message-Id: <1456236176-77883-2-git-send-email-deepak.k.jain@intel.com> X-Mailer: git-send-email 2.1.0 In-Reply-To: <1456236176-77883-1-git-send-email-deepak.k.jain@intel.com> References: <1454003176-59256-1-git-send-email-deepak.k.jain@intel.com> <1456236176-77883-1-git-send-email-deepak.k.jain@intel.com> Subject: [dpdk-dev] [PATCH v2 1/3] crypto: add cipher/auth only support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 23 Feb 2016 13:04:57 -0000 Refactored the existing functionality into modular form to support the cipher/auth only functionalities. Signed-off-by: Deepak Kumar JAIN --- drivers/crypto/qat/qat_adf/qat_algs.h | 18 +- drivers/crypto/qat/qat_adf/qat_algs_build_desc.c | 210 ++++++++++++++++++++--- drivers/crypto/qat/qat_crypto.c | 137 +++++++++++---- drivers/crypto/qat/qat_crypto.h | 10 ++ 4 files changed, 308 insertions(+), 67 deletions(-) diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h index 76c08c0..b73a5d0 100644 --- a/drivers/crypto/qat/qat_adf/qat_algs.h +++ b/drivers/crypto/qat/qat_adf/qat_algs.h @@ -3,7 +3,7 @@ * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015-2016 Intel Corporation. * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -17,7 +17,7 @@ * qat-linux@intel.com * * BSD LICENSE - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015-2016 Intel Corporation. * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: @@ -104,11 +104,15 @@ struct qat_alg_ablkcipher_cd { int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg); -int qat_alg_aead_session_create_content_desc(struct qat_session *cd, - uint8_t *enckey, uint32_t enckeylen, - uint8_t *authkey, uint32_t authkeylen, - uint32_t add_auth_data_length, - uint32_t digestsize); +int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cd, + uint8_t *enckey, + uint32_t enckeylen); + +int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, + uint8_t *authkey, + uint32_t authkeylen, + uint32_t add_auth_data_length, + uint32_t digestsize); void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header); diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c index ceaffb7..bef444b 100644 --- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c +++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c @@ -3,7 +3,7 @@ * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015-2016 Intel Corporation. * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -17,7 +17,7 @@ * qat-linux@intel.com * * BSD LICENSE - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015-2016 Intel Corporation. * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: @@ -359,15 +359,139 @@ void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header) ICP_QAT_FW_LA_NO_UPDATE_STATE); } -int qat_alg_aead_session_create_content_desc(struct qat_session *cdesc, - uint8_t *cipherkey, uint32_t cipherkeylen, - uint8_t *authkey, uint32_t authkeylen, - uint32_t add_auth_data_length, - uint32_t digestsize) +int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc, + uint8_t *cipherkey, + uint32_t cipherkeylen) { - struct qat_alg_cd *content_desc = &cdesc->cd; - struct icp_qat_hw_cipher_algo_blk *cipher = &content_desc->cipher; - struct icp_qat_hw_auth_algo_blk *hash = &content_desc->hash; + struct icp_qat_hw_cipher_algo_blk *cipher; + struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req; + struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + void *ptr = &req_tmpl->cd_ctrl; + struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; + struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; + enum icp_qat_hw_cipher_convert key_convert; + uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */ + uint16_t cipher_offset = 0; + + PMD_INIT_FUNC_TRACE(); + + if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) { + cipher = + (struct icp_qat_hw_cipher_algo_blk *)((char *)&cdesc->cd + + sizeof(struct icp_qat_hw_auth_algo_blk)); + cipher_offset = sizeof(struct icp_qat_hw_auth_algo_blk); + } else { + cipher = (struct icp_qat_hw_cipher_algo_blk *)&cdesc->cd; + cipher_offset = 0; + } + /* CD setup */ + if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) { + ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_RET_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_CMP_AUTH_RES); + } else { + ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_RET_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_CMP_AUTH_RES); + } + + if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) { + /* CTR Streaming ciphers are a special case. Decrypt = encrypt + * Overriding default values previously set + */ + cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; + key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; + } else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) + key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; + else + key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; + + /* For Snow3G, set key convert and other bits */ + if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { + key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; + ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_RET_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_CMP_AUTH_RES); + } + + cipher->aes.cipher_config.val = + ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode, + cdesc->qat_cipher_alg, key_convert, + cdesc->qat_dir); + memcpy(cipher->aes.key, cipherkey, cipherkeylen); + + proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags); + if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) + proto = ICP_QAT_FW_LA_SNOW_3G_PROTO; + + /* Request template setup */ + qat_alg_init_common_hdr(header); + header->service_cmd_id = cdesc->qat_cmd; + + ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER); + /* Configure the common header protocol flags */ + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, proto); + cd_pars->u.s.content_desc_addr = cdesc->cd_paddr; + cd_pars->u.s.content_desc_params_sz = sizeof(cdesc->cd) >> 3; + + /* Cipher CD config setup */ + if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { + cipher_cd_ctrl->cipher_key_sz = + (ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ + + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ) >> 3; + cipher_cd_ctrl->cipher_state_sz = + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3; + cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3; + } else { + cipher_cd_ctrl->cipher_key_sz = cipherkeylen >> 3; + cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3; + cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3; + } + + if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { + ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_DRAM_WR); + } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) { + ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_AUTH); + ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_AUTH); + ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_DRAM_WR); + } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) { + ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_AUTH); + ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_DRAM_WR); + } else { + PMD_DRV_LOG(ERR, "invalid param, only authenticated " + "encryption supported"); + return -EFAULT; + } + return 0; +} + +int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, + uint8_t *authkey, + uint32_t authkeylen, + uint32_t add_auth_data_length, + uint32_t digestsize) +{ + struct icp_qat_hw_cipher_algo_blk *cipher; + struct icp_qat_hw_auth_algo_blk *hash; struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req; struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; @@ -379,31 +503,57 @@ int qat_alg_aead_session_create_content_desc(struct qat_session *cdesc, ((char *)&req_tmpl->serv_specif_rqpars + sizeof(struct icp_qat_fw_la_cipher_req_params)); enum icp_qat_hw_cipher_convert key_convert; - uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */ + uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */ uint16_t state1_size = 0; uint16_t state2_size = 0; + uint16_t cipher_offset = 0, hash_offset = 0; PMD_INIT_FUNC_TRACE(); + if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) { + hash = (struct icp_qat_hw_auth_algo_blk *)&cdesc->cd; + cipher = + (struct icp_qat_hw_cipher_algo_blk *)((char *)&cdesc->cd + + sizeof(struct icp_qat_hw_auth_algo_blk)); + hash_offset = 0; + cipher_offset = ((char *)hash - (char *)cipher); + } else { + cipher = (struct icp_qat_hw_cipher_algo_blk *)&cdesc->cd; + hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&cdesc->cd + + sizeof(struct icp_qat_hw_cipher_algo_blk)); + cipher_offset = 0; + hash_offset = ((char *)hash - (char *)cipher); + } + /* CD setup */ if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) { - key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_RET_AUTH_RES); + ICP_QAT_FW_LA_RET_AUTH_RES); ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_CMP_AUTH_RES); + ICP_QAT_FW_LA_NO_CMP_AUTH_RES); } else { - key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_RET_AUTH_RES); + ICP_QAT_FW_LA_NO_RET_AUTH_RES); ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_CMP_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_RES); } - cipher->aes.cipher_config.val = ICP_QAT_HW_CIPHER_CONFIG_BUILD( - cdesc->qat_mode, cdesc->qat_cipher_alg, key_convert, - cdesc->qat_dir); - memcpy(cipher->aes.key, cipherkey, cipherkeylen); + if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) { + /* CTR Streaming ciphers are a special case. Decrypt = encrypt + * Overriding default values previously set + */ + cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; + key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; + } else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) + key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; + else + key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; + + cipher->aes.cipher_config.val = + ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode, + cdesc->qat_cipher_alg, key_convert, + cdesc->qat_dir); + memcpy(cipher->aes.key, authkey, authkeylen); hash->sha.inner_setup.auth_config.reserved = 0; hash->sha.inner_setup.auth_config.config = @@ -423,7 +573,7 @@ int qat_alg_aead_session_create_content_desc(struct qat_session *cdesc, } else if ((cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) || (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) { if (qat_alg_do_precomputes(cdesc->qat_hash_alg, - cipherkey, cipherkeylen, (uint8_t *)(hash->sha.state1 + + authkey, authkeylen, (uint8_t *)(hash->sha.state1 + ICP_QAT_HW_GALOIS_128_STATE1_SZ), &state2_size)) { PMD_DRV_LOG(ERR, "(GCM)precompute failed"); return -EFAULT; @@ -454,15 +604,15 @@ int qat_alg_aead_session_create_content_desc(struct qat_session *cdesc, /* Configure the common header protocol flags */ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, proto); cd_pars->u.s.content_desc_addr = cdesc->cd_paddr; - cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3; + cd_pars->u.s.content_desc_params_sz = sizeof(cdesc->cd) >> 3; /* Cipher CD config setup */ - cipher_cd_ctrl->cipher_key_sz = cipherkeylen >> 3; + cipher_cd_ctrl->cipher_key_sz = authkeylen >> 3; cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3; - cipher_cd_ctrl->cipher_cfg_offset = 0; + cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3; /* Auth CD config setup */ - hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3; + hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3; hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; hash_cd_ctrl->inner_res_sz = digestsize; hash_cd_ctrl->final_sz = digestsize; @@ -505,8 +655,12 @@ int qat_alg_aead_session_create_content_desc(struct qat_session *cdesc, >> 3); auth_param->auth_res_sz = digestsize; - - if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) { + if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) { + ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_AUTH); + ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_DRAM_WR); + } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) { ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c index 69162b1..9fe48cb 100644 --- a/drivers/crypto/qat/qat_crypto.c +++ b/drivers/crypto/qat/qat_crypto.c @@ -90,16 +90,16 @@ void qat_crypto_sym_clear_session(struct rte_cryptodev *dev, static int qat_get_cmd_id(const struct rte_crypto_sym_xform *xform) { - if (xform->next == NULL) - return -1; - /* Cipher Only */ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) - return -1; /* return ICP_QAT_FW_LA_CMD_CIPHER; */ + return ICP_QAT_FW_LA_CMD_CIPHER; /* Authentication Only */ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL) - return -1; /* return ICP_QAT_FW_LA_CMD_AUTH; */ + return ICP_QAT_FW_LA_CMD_AUTH; + + if (xform->next == NULL) + return -1; /* Cipher then Authenticate */ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && @@ -139,31 +139,16 @@ qat_get_cipher_xform(struct rte_crypto_sym_xform *xform) return NULL; } - - void * -qat_crypto_sym_configure_session(struct rte_cryptodev *dev, +qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, void *session_private) { struct qat_pmd_private *internals = dev->data->dev_private; struct qat_session *session = session_private; - struct rte_crypto_auth_xform *auth_xform = NULL; struct rte_crypto_cipher_xform *cipher_xform = NULL; - int qat_cmd_id; - - PMD_INIT_FUNC_TRACE(); - - /* Get requested QAT command id */ - qat_cmd_id = qat_get_cmd_id(xform); - if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) { - PMD_DRV_LOG(ERR, "Unsupported xform chain requested"); - goto error_out; - } - session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id; - /* Get cipher xform from crypto xform chain */ cipher_xform = qat_get_cipher_xform(xform); @@ -205,8 +190,87 @@ qat_crypto_sym_configure_session(struct rte_cryptodev *dev, else session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; + if (qat_alg_aead_session_create_content_desc_cipher(session, + cipher_xform->key.data, + cipher_xform->key.length)) + goto error_out; + + return session; + +error_out: + rte_mempool_put(internals->sess_mp, session); + return NULL; +} + + +void * +qat_crypto_sym_configure_session(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, void *session_private) +{ + struct qat_pmd_private *internals = dev->data->dev_private; + + struct qat_session *session = session_private; + + int qat_cmd_id; + + PMD_INIT_FUNC_TRACE(); + + /* Get requested QAT command id */ + qat_cmd_id = qat_get_cmd_id(xform); + if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) { + PMD_DRV_LOG(ERR, "Unsupported xform chain requested"); + goto error_out; + } + session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id; + switch (session->qat_cmd) { + case ICP_QAT_FW_LA_CMD_CIPHER: + session = qat_crypto_sym_configure_session_cipher(dev, xform, session); + break; + case ICP_QAT_FW_LA_CMD_AUTH: + session = qat_crypto_sym_configure_session_auth(dev, xform, session); + break; + case ICP_QAT_FW_LA_CMD_CIPHER_HASH: + session = qat_crypto_sym_configure_session_cipher(dev, xform, session); + session = qat_crypto_sym_configure_session_auth(dev, xform, session); + break; + case ICP_QAT_FW_LA_CMD_HASH_CIPHER: + session = qat_crypto_sym_configure_session_auth(dev, xform, session); + session = qat_crypto_sym_configure_session_cipher(dev, xform, session); + break; + case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM: + case ICP_QAT_FW_LA_CMD_TRNG_TEST: + case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE: + case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE: + case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE: + case ICP_QAT_FW_LA_CMD_MGF1: + case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP: + case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP: + case ICP_QAT_FW_LA_CMD_DELIMITER: + PMD_DRV_LOG(ERR, "Unsupported Service %u", + session->qat_cmd); + goto error_out; + default: + PMD_DRV_LOG(ERR, "Unsupported Service %u", + session->qat_cmd); + goto error_out; + } + return session; + +error_out: + rte_mempool_put(internals->sess_mp, session); + return NULL; +} + +struct qat_session * +qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct qat_session *session_private) +{ - /* Get authentication xform from Crypto xform chain */ + struct qat_pmd_private *internals = dev->data->dev_private; + struct qat_session *session = session_private; + struct rte_crypto_auth_xform *auth_xform = NULL; + struct rte_crypto_cipher_xform *cipher_xform = NULL; auth_xform = qat_get_auth_xform(xform); switch (auth_xform->algo) { @@ -250,17 +314,26 @@ qat_crypto_sym_configure_session(struct rte_cryptodev *dev, auth_xform->algo); goto error_out; } + cipher_xform = qat_get_cipher_xform(xform); - if (qat_alg_aead_session_create_content_desc(session, - cipher_xform->key.data, - cipher_xform->key.length, - auth_xform->key.data, - auth_xform->key.length, - auth_xform->add_auth_data_length, - auth_xform->digest_length)) - goto error_out; - - return (struct rte_crypto_sym_session *)session; + if ((session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) || + (session->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) { + if (qat_alg_aead_session_create_content_desc_auth(session, + cipher_xform->key.data, + cipher_xform->key.length, + auth_xform->add_auth_data_length, + auth_xform->digest_length)) + goto error_out; + } else { + if (qat_alg_aead_session_create_content_desc_auth(session, + auth_xform->key.data, + auth_xform->key.length, + auth_xform->add_auth_data_length, + auth_xform->digest_length)) + goto error_out; + } + return session; error_out: rte_mempool_put(internals->sess_mp, session); diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h index 9323383..0afe74e 100644 --- a/drivers/crypto/qat/qat_crypto.h +++ b/drivers/crypto/qat/qat_crypto.h @@ -111,6 +111,16 @@ extern void * qat_crypto_sym_configure_session(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, void *session_private); +struct qat_session * +qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct qat_session *session_private); + +void * +qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, void *session_private); + + extern void qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session); -- 2.1.0