From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by dpdk.org (Postfix) with ESMTP id A37695589 for ; Thu, 15 Sep 2016 12:03:40 +0200 (CEST) Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by fmsmga102.fm.intel.com with ESMTP; 15 Sep 2016 03:03:41 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.30,338,1470726000"; d="scan'208";a="8675921" Received: from sie-lab-214-241.ir.intel.com (HELO silpixa00382162.ir.intel.com) ([10.237.214.241]) by fmsmga005.fm.intel.com with ESMTP; 15 Sep 2016 03:03:38 -0700 From: Deepak Kumar Jain To: dev@dpdk.org Cc: pablo.de.lara.guarch@intel.com, Deepak Kumar Jain Date: Thu, 15 Sep 2016 11:03:35 +0100 Message-Id: <1473933815-185834-3-git-send-email-deepak.k.jain@intel.com> X-Mailer: git-send-email 2.5.5 In-Reply-To: <1473933815-185834-1-git-send-email-deepak.k.jain@intel.com> References: <1472131419-89617-1-git-send-email-deepak.k.jain@intel.com> <1473933815-185834-1-git-send-email-deepak.k.jain@intel.com> Subject: [dpdk-dev] [PATCH v3 2/3] crypto/qat: add Kasumi support in Intel(R) QAT driver X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 15 Sep 2016 10:03:42 -0000 This patch add kasumi support in Intel(R) QuickAssist driver. Signed-off-by: Deepak Kumar Jain --- doc/guides/cryptodevs/qat.rst | 10 +-- doc/guides/rel_notes/release_16_11.rst | 2 +- drivers/crypto/qat/qat_adf/qat_algs.h | 10 ++- drivers/crypto/qat/qat_adf/qat_algs_build_desc.c | 72 +++++++++++++++++++-- drivers/crypto/qat/qat_crypto.c | 79 ++++++++++++++++++++++-- 5 files changed, 158 insertions(+), 15 deletions(-) diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst index 78cadc4..6cdfb93 100644 --- a/doc/guides/cryptodevs/qat.rst +++ b/doc/guides/cryptodevs/qat.rst @@ -51,6 +51,7 @@ Cipher algorithms: * ``RTE_CRYPTO_CIPHER_SNOW3G_UEA2`` * ``RTE_CRYPTO_CIPHER_AES_GCM`` * ``RTE_CRYPTO_CIPHER_NULL`` +* ``RTE_CRYPTO_CIPHER_KASUMI_F8`` Hash algorithms: @@ -63,17 +64,18 @@ Hash algorithms: * ``RTE_CRYPTO_AUTH_SNOW3G_UIA2`` * ``RTE_CRYPTO_AUTH_MD5_HMAC`` * ``RTE_CRYPTO_AUTH_NULL`` +* ``RTE_CRYPTO_AUTH_KASUMI_F9`` Limitations ----------- * Chained mbufs are not supported. -* Hash only is not supported except Snow3G UIA2. -* Cipher only is not supported except Snow3G UEA2. +* Hash only is not supported except Snow3G UIA2 and Kasumi F9. +* Cipher only is not supported except Snow3G UEA2 and Kasumi F8. * Only supports the session-oriented API implementation (session-less APIs are not supported). * Not performance tuned. -* Snow3g(UEA2) supported only if cipher length, cipher offset fields are byte-aligned. -* Snow3g(UIA2) supported only if hash length, hash offset fields are byte-aligned. +* Snow3g(UEA2) and Kasumi(F8) supported only if cipher length, cipher offset fields are byte-aligned. +* Snow3g(UIA2) and kasumi(F9) supported only if hash length, hash offset fields are byte-aligned. * No BSD support as BSD QAT kernel driver not available. * Snow3g (UIA2) not supported in the PMD of **Intel QuickAssist Technology C3xxx** device. diff --git a/doc/guides/rel_notes/release_16_11.rst b/doc/guides/rel_notes/release_16_11.rst index 4bc67e0..1dd0e6a 100644 --- a/doc/guides/rel_notes/release_16_11.rst +++ b/doc/guides/rel_notes/release_16_11.rst @@ -50,7 +50,7 @@ New Features * Added support for SHA224-HMAC algorithm. * Added support for SHA384-HMAC algorithm. * Added support for NULL algorithm. - + * Added support for KASUMI (F8 and F9) algorithm. Resolved Issues --------------- diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h index 6a86053..fad8471 100644 --- a/drivers/crypto/qat/qat_adf/qat_algs.h +++ b/drivers/crypto/qat/qat_adf/qat_algs.h @@ -51,6 +51,14 @@ #include "icp_qat_fw.h" #include "icp_qat_fw_la.h" +/* + * Key Modifier (KM) value used in Kasumi algorithm in F9 mode to XOR + * Integrity Key (IK) + */ +#define KASUMI_F9_KEY_MODIFIER_4_BYTES 0xAAAAAAAA + +#define KASUMI_F8_KEY_MODIFIER_4_BYTES 0x55555555 + #define QAT_AES_HW_CONFIG_CBC_ENC(alg) \ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ ICP_QAT_HW_CIPHER_NO_CONVERT, \ @@ -130,5 +138,5 @@ void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cd, int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg); int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg); - +int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg); #endif diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c index d9437bc..131800c 100644 --- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c +++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c @@ -96,6 +96,9 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg) case ICP_QAT_HW_AUTH_ALGO_MD5: return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ, QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9: + return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); case ICP_QAT_HW_AUTH_ALGO_DELIMITER: /* return maximum state1 size in this case */ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ, @@ -454,7 +457,8 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc, uint32_t total_key_size; uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */ uint16_t cipher_offset, cd_size; - + uint32_t wordIndex = 0; + uint32_t *temp_key = NULL; PMD_INIT_FUNC_TRACE(); if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { @@ -504,6 +508,11 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc, cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3; proto = ICP_QAT_FW_LA_SNOW_3G_PROTO; + } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) { + total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ; + cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3; + cipher_cd_ctrl->cipher_padding_sz = + (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3; } else { total_key_size = cipherkeylen; cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3; @@ -521,9 +530,27 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc, ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode, cdesc->qat_cipher_alg, key_convert, cdesc->qat_dir); - memcpy(cipher->aes.key, cipherkey, cipherkeylen); - cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) + - cipherkeylen; + + if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) { + temp_key = (uint32_t *)(cdesc->cd_cur_ptr + + sizeof(struct icp_qat_hw_cipher_config) + + cipherkeylen); + memcpy(cipher->aes.key, cipherkey, cipherkeylen); + memcpy(temp_key, cipherkey, cipherkeylen); + + /* XOR Key with KASUMI F8 key modifier at 4 bytes level */ + for (wordIndex = 0; wordIndex < (cipherkeylen >> 2); + wordIndex++) + temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES; + + cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) + + cipherkeylen + cipherkeylen; + } else { + memcpy(cipher->aes.key, cipherkey, cipherkeylen); + cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) + + cipherkeylen; + } + if (total_key_size > cipherkeylen) { uint32_t padding_size = total_key_size-cipherkeylen; @@ -559,6 +586,8 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, uint16_t state1_size = 0, state2_size = 0; uint16_t hash_offset, cd_size; uint32_t *aad_len = NULL; + uint32_t wordIndex = 0; + uint32_t *pTempKey; PMD_INIT_FUNC_TRACE(); @@ -605,7 +634,8 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, cdesc->qat_hash_alg, digestsize); - if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) + if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 + || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) hash->auth_counter.counter = 0; else hash->auth_counter.counter = rte_bswap32( @@ -722,6 +752,26 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, break; case ICP_QAT_HW_AUTH_ALGO_NULL: break; + case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9: + state1_size = qat_hash_get_state1_size( + ICP_QAT_HW_AUTH_ALGO_KASUMI_F9); + state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ; + memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size); + pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size + + authkeylen); + /* + * The Inner Hash Initial State2 block must contain IK + * (Initialisation Key), followed by IK XOR-ed with KM + * (Key Modifier): IK||(IK^KM). + */ + /* write the auth key */ + memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen); + /* initialise temp key with auth key */ + memcpy(pTempKey, authkey, authkeylen); + /* XOR Key with KASUMI F9 key modifier at 4 bytes level */ + for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++) + pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES; + break; default: PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg); return -EFAULT; @@ -833,3 +883,15 @@ int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg) } return 0; } + +int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg) +{ + switch (key_len) { + case ICP_QAT_HW_KASUMI_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI; + break; + default: + return -EINVAL; + } + return 0; +} diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c index bc8d5b1..1bb1dd1 100644 --- a/drivers/crypto/qat/qat_crypto.c +++ b/drivers/crypto/qat/qat_crypto.c @@ -387,6 +387,51 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = { }, }, }, } }, + { /* KASUMI (F8) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_KASUMI_F8, + .block_size = 8, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .iv_size = { + .min = 16, + .max = 16, + .increment = 0 + } + }, } + }, } + }, + { /* KASUMI (F9) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_KASUMI_F9, + .block_size = 8, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .digest_size = { + .min = 4, + .max = 4, + .increment = 0 + }, + .aad_size = { + .min = 8, + .max = 8, + .increment = 0 + } + }, } + }, } + }, RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() }; @@ -512,11 +557,18 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, case RTE_CRYPTO_CIPHER_NULL: session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; break; + case RTE_CRYPTO_CIPHER_KASUMI_F8: + if (qat_alg_validate_kasumi_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE; + break; case RTE_CRYPTO_CIPHER_3DES_ECB: case RTE_CRYPTO_CIPHER_3DES_CBC: case RTE_CRYPTO_CIPHER_AES_ECB: case RTE_CRYPTO_CIPHER_AES_CCM: - case RTE_CRYPTO_CIPHER_KASUMI_F8: PMD_DRV_LOG(ERR, "Crypto: Unsupported Cipher alg %u", cipher_xform->algo); goto error_out; @@ -645,6 +697,9 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, case RTE_CRYPTO_AUTH_NULL: session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL; break; + case RTE_CRYPTO_AUTH_KASUMI_F9: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9; + break; case RTE_CRYPTO_AUTH_SHA1: case RTE_CRYPTO_AUTH_SHA256: case RTE_CRYPTO_AUTH_SHA512: @@ -653,7 +708,6 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, case RTE_CRYPTO_AUTH_MD5: case RTE_CRYPTO_AUTH_AES_CCM: case RTE_CRYPTO_AUTH_AES_GMAC: - case RTE_CRYPTO_AUTH_KASUMI_F9: case RTE_CRYPTO_AUTH_AES_CMAC: case RTE_CRYPTO_AUTH_AES_CBC_MAC: case RTE_CRYPTO_AUTH_ZUC_EIA3: @@ -857,11 +911,12 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg) cipher_param->cipher_length = op->sym->cipher.data.length; cipher_param->cipher_offset = op->sym->cipher.data.offset; - if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { + if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || + ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) { if (unlikely((cipher_param->cipher_length % BYTE_LENGTH != 0) || (cipher_param->cipher_offset % BYTE_LENGTH != 0))) { - PMD_DRV_LOG(ERR, " For Snow3g, QAT PMD only " + PMD_DRV_LOG(ERR, " For Snow3g/Kasumi, QAT PMD only " "supports byte aligned values"); op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; return -EINVAL; @@ -900,6 +955,22 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg) auth_param->auth_off >>= 3; auth_param->auth_len >>= 3; } + if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER || + ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) && + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) { + auth_param->auth_len = (auth_param->auth_len >> 3) + + (auth_param->auth_off >> 3) + + (BYTE_LENGTH >> 3) + - 8; + auth_param->auth_off = 8; + } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH + && ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) { + auth_param->auth_len = (auth_param->auth_len >> 3) + + (auth_param->auth_off >> 3) + + (BYTE_LENGTH >> 3); + auth_param->auth_off = 0; + } auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr; if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || -- 2.5.5