From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga03.intel.com (mga03.intel.com [134.134.136.65]) by dpdk.org (Postfix) with ESMTP id 0E733C496 for ; Thu, 28 Jan 2016 17:48:07 +0100 (CET) Received: from orsmga003.jf.intel.com ([10.7.209.27]) by orsmga103.jf.intel.com with ESMTP; 28 Jan 2016 08:48:07 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.22,359,1449561600"; d="scan'208";a="735983791" Received: from sie-lab-214-241.ir.intel.com (HELO silpixa00382162.ir.intel.com) ([10.237.214.241]) by orsmga003.jf.intel.com with ESMTP; 28 Jan 2016 08:48:05 -0800 From: Deepak Kumar JAIN To: dev@dpdk.org Date: Thu, 28 Jan 2016 17:46:14 +0000 Message-Id: <1454003176-59256-2-git-send-email-deepak.k.jain@intel.com> X-Mailer: git-send-email 2.1.0 In-Reply-To: <1454003176-59256-1-git-send-email-deepak.k.jain@intel.com> References: <1454003176-59256-1-git-send-email-deepak.k.jain@intel.com> Subject: [dpdk-dev] [PATCH 1/3] crypto: add cipher/auth only support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Thu, 28 Jan 2016 16:48:09 -0000 Refactored the existing functionality into modular form to support the cipher/auth only functionalities. Signed-off-by: Deepak Kumar JAIN --- drivers/crypto/qat/qat_adf/qat_algs.h | 20 ++- drivers/crypto/qat/qat_adf/qat_algs_build_desc.c | 206 ++++++++++++++++++++--- drivers/crypto/qat/qat_crypto.c | 136 +++++++++++---- drivers/crypto/qat/qat_crypto.h | 12 +- 4 files changed, 308 insertions(+), 66 deletions(-) diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h index 76c08c0..d4aa087 100644 --- a/drivers/crypto/qat/qat_adf/qat_algs.h +++ b/drivers/crypto/qat/qat_adf/qat_algs.h @@ -3,7 +3,7 @@ * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015-2016 Intel Corporation. * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -17,7 +17,7 @@ * qat-linux@intel.com * * BSD LICENSE - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015-2016 Intel Corporation. * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: @@ -104,11 +104,17 @@ struct qat_alg_ablkcipher_cd { int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg); -int qat_alg_aead_session_create_content_desc(struct qat_session *cd, - uint8_t *enckey, uint32_t enckeylen, - uint8_t *authkey, uint32_t authkeylen, - uint32_t add_auth_data_length, - uint32_t digestsize); +int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cd, + uint8_t *enckey, + uint32_t enckeylen); + +int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, + uint8_t *cipherkey, + uint32_t cipherkeylen, + uint8_t *authkey, + uint32_t authkeylen, + uint32_t add_auth_data_length, + uint32_t digestsize); void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header); diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c index ceaffb7..88fd803 100644 --- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c +++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c @@ -3,7 +3,7 @@ * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015-2016 Intel Corporation. * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. @@ -17,7 +17,7 @@ * qat-linux@intel.com * * BSD LICENSE - * Copyright(c) 2015 Intel Corporation. + * Copyright(c) 2015-2016 Intel Corporation. * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: @@ -359,15 +359,141 @@ void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header) ICP_QAT_FW_LA_NO_UPDATE_STATE); } -int qat_alg_aead_session_create_content_desc(struct qat_session *cdesc, - uint8_t *cipherkey, uint32_t cipherkeylen, - uint8_t *authkey, uint32_t authkeylen, - uint32_t add_auth_data_length, - uint32_t digestsize) +int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc, + uint8_t *cipherkey, + uint32_t cipherkeylen) { - struct qat_alg_cd *content_desc = &cdesc->cd; - struct icp_qat_hw_cipher_algo_blk *cipher = &content_desc->cipher; - struct icp_qat_hw_auth_algo_blk *hash = &content_desc->hash; + struct icp_qat_hw_cipher_algo_blk *cipher; + struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req; + struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + void *ptr = &req_tmpl->cd_ctrl; + struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; + struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; + enum icp_qat_hw_cipher_convert key_convert; + uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */ + uint16_t cipher_offset = 0; + + PMD_INIT_FUNC_TRACE(); + + if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) { + cipher = + (struct icp_qat_hw_cipher_algo_blk *)((char *)&cdesc->cd + + sizeof(struct icp_qat_hw_auth_algo_blk)); + cipher_offset = sizeof(struct icp_qat_hw_auth_algo_blk); + } else { + cipher = (struct icp_qat_hw_cipher_algo_blk *)&cdesc->cd; + cipher_offset = 0; + } + /* CD setup */ + if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) { + ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_RET_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_CMP_AUTH_RES); + } else { + ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_RET_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_CMP_AUTH_RES); + } + + if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) { + /* CTR Streaming ciphers are a special case. Decrypt = encrypt + * Overriding default values previously set + */ + cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; + key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; + } else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) + key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; + else + key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; + + /* For Snow3G, set key convert and other bits */ + if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { + key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; + ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_RET_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_CMP_AUTH_RES); + } + + cipher->aes.cipher_config.val = + ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode, + cdesc->qat_cipher_alg, key_convert, + cdesc->qat_dir); + memcpy(cipher->aes.key, cipherkey, cipherkeylen); + + proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags); + if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) + proto = ICP_QAT_FW_LA_SNOW_3G_PROTO; + + /* Request template setup */ + qat_alg_init_common_hdr(header); + header->service_cmd_id = cdesc->qat_cmd; + + ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER); + /* Configure the common header protocol flags */ + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, proto); + cd_pars->u.s.content_desc_addr = cdesc->cd_paddr; + cd_pars->u.s.content_desc_params_sz = sizeof(cdesc->cd) >> 3; + + /* Cipher CD config setup */ + if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { + cipher_cd_ctrl->cipher_key_sz = + (ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ + + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ) >> 3; + cipher_cd_ctrl->cipher_state_sz = + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3; + cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3; + } else { + cipher_cd_ctrl->cipher_key_sz = cipherkeylen >> 3; + cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3; + cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3; + } + + if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { + ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_DRAM_WR); + } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) { + ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_AUTH); + ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_AUTH); + ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_DRAM_WR); + } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) { + ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_AUTH); + ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_DRAM_WR); + } else { + PMD_DRV_LOG(ERR, "invalid param, only authenticated " + "encryption supported"); + return -EFAULT; + } + return 0; +} + +int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, + uint8_t *cipherkey, + uint32_t cipherkeylen, + uint8_t *authkey, + uint32_t authkeylen, + uint32_t add_auth_data_length, + uint32_t digestsize) +{ + struct icp_qat_hw_cipher_algo_blk *cipher; + struct icp_qat_hw_auth_algo_blk *hash; struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req; struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; @@ -379,30 +505,56 @@ int qat_alg_aead_session_create_content_desc(struct qat_session *cdesc, ((char *)&req_tmpl->serv_specif_rqpars + sizeof(struct icp_qat_fw_la_cipher_req_params)); enum icp_qat_hw_cipher_convert key_convert; - uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */ + uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */ uint16_t state1_size = 0; uint16_t state2_size = 0; + uint16_t cipher_offset = 0, hash_offset = 0; PMD_INIT_FUNC_TRACE(); + if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) { + hash = (struct icp_qat_hw_auth_algo_blk *)&cdesc->cd; + cipher = + (struct icp_qat_hw_cipher_algo_blk *)((char *)&cdesc->cd + + sizeof(struct icp_qat_hw_auth_algo_blk)); + hash_offset = 0; + cipher_offset = ((char *)hash - (char *)cipher); + } else { + cipher = (struct icp_qat_hw_cipher_algo_blk *)&cdesc->cd; + hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&cdesc->cd + + sizeof(struct icp_qat_hw_cipher_algo_blk)); + cipher_offset = 0; + hash_offset = ((char *)hash - (char *)cipher); + } + /* CD setup */ if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) { - key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_RET_AUTH_RES); + ICP_QAT_FW_LA_RET_AUTH_RES); ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_CMP_AUTH_RES); + ICP_QAT_FW_LA_NO_CMP_AUTH_RES); } else { - key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_RET_AUTH_RES); + ICP_QAT_FW_LA_NO_RET_AUTH_RES); ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_CMP_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_RES); } - cipher->aes.cipher_config.val = ICP_QAT_HW_CIPHER_CONFIG_BUILD( - cdesc->qat_mode, cdesc->qat_cipher_alg, key_convert, - cdesc->qat_dir); + if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) { + /* CTR Streaming ciphers are a special case. Decrypt = encrypt + * Overriding default values previously set + */ + cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; + key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; + } else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) + key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; + else + key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; + + cipher->aes.cipher_config.val = + ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode, + cdesc->qat_cipher_alg, key_convert, + cdesc->qat_dir); memcpy(cipher->aes.key, cipherkey, cipherkeylen); hash->sha.inner_setup.auth_config.reserved = 0; @@ -454,15 +606,15 @@ int qat_alg_aead_session_create_content_desc(struct qat_session *cdesc, /* Configure the common header protocol flags */ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, proto); cd_pars->u.s.content_desc_addr = cdesc->cd_paddr; - cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3; + cd_pars->u.s.content_desc_params_sz = sizeof(cdesc->cd) >> 3; /* Cipher CD config setup */ cipher_cd_ctrl->cipher_key_sz = cipherkeylen >> 3; cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3; - cipher_cd_ctrl->cipher_cfg_offset = 0; + cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3; /* Auth CD config setup */ - hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3; + hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3; hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; hash_cd_ctrl->inner_res_sz = digestsize; hash_cd_ctrl->final_sz = digestsize; @@ -505,8 +657,12 @@ int qat_alg_aead_session_create_content_desc(struct qat_session *cdesc, >> 3); auth_param->auth_res_sz = digestsize; - - if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) { + if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) { + ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_AUTH); + ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_DRAM_WR); + } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) { ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER); ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c index 47b257f..e524638 100644 --- a/drivers/crypto/qat/qat_crypto.c +++ b/drivers/crypto/qat/qat_crypto.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -91,16 +91,17 @@ void qat_crypto_sym_clear_session(struct rte_cryptodev *dev, static int qat_get_cmd_id(const struct rte_crypto_xform *xform) { - if (xform->next == NULL) - return -1; /* Cipher Only */ if (xform->type == RTE_CRYPTO_XFORM_CIPHER && xform->next == NULL) - return -1; /* return ICP_QAT_FW_LA_CMD_CIPHER; */ + return ICP_QAT_FW_LA_CMD_CIPHER; /* Authentication Only */ if (xform->type == RTE_CRYPTO_XFORM_AUTH && xform->next == NULL) - return -1; /* return ICP_QAT_FW_LA_CMD_AUTH; */ + return -1; + + if (xform->next == NULL) + return -1; /* Cipher then Authenticate */ if (xform->type == RTE_CRYPTO_XFORM_CIPHER && @@ -140,32 +141,14 @@ qat_get_cipher_xform(struct rte_crypto_xform *xform) return NULL; } - - -void * -qat_crypto_sym_configure_session(struct rte_cryptodev *dev, - struct rte_crypto_xform *xform, void *session_private) +struct qat_session * +qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, + struct rte_crypto_xform *xform, + struct qat_session *session_private) { struct qat_pmd_private *internals = dev->data->dev_private; - struct qat_session *session = session_private; - - struct rte_crypto_auth_xform *auth_xform = NULL; struct rte_crypto_cipher_xform *cipher_xform = NULL; - - int qat_cmd_id; - - PMD_INIT_FUNC_TRACE(); - - /* Get requested QAT command id */ - qat_cmd_id = qat_get_cmd_id(xform); - if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) { - PMD_DRV_LOG(ERR, "Unsupported xform chain requested"); - goto error_out; - } - session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id; - - /* Get cipher xform from crypto xform chain */ cipher_xform = qat_get_cipher_xform(xform); switch (cipher_xform->algo) { @@ -206,8 +189,28 @@ qat_crypto_sym_configure_session(struct rte_cryptodev *dev, else session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; + if (qat_alg_aead_session_create_content_desc_cipher(session, + cipher_xform->key.data, + cipher_xform->key.length)) + goto error_out; + + return session; - /* Get authentication xform from Crypto xform chain */ +error_out: + rte_mempool_put(internals->sess_mp, session); + return NULL; +} + +struct qat_session * +qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, + struct rte_crypto_xform *xform, + struct qat_session *session_private) +{ + + struct qat_pmd_private *internals = dev->data->dev_private; + struct qat_session *session = session_private; + struct rte_crypto_auth_xform *auth_xform = NULL; + struct rte_crypto_cipher_xform *cipher_xform = NULL; auth_xform = qat_get_auth_xform(xform); switch (auth_xform->algo) { @@ -251,8 +254,9 @@ qat_crypto_sym_configure_session(struct rte_cryptodev *dev, auth_xform->algo); goto error_out; } + cipher_xform = qat_get_cipher_xform(xform); - if (qat_alg_aead_session_create_content_desc(session, + if (qat_alg_aead_session_create_content_desc_auth(session, cipher_xform->key.data, cipher_xform->key.length, auth_xform->key.data, @@ -261,19 +265,85 @@ qat_crypto_sym_configure_session(struct rte_cryptodev *dev, auth_xform->digest_length)) goto error_out; - return (struct rte_cryptodev_session *)session; + return session; error_out: rte_mempool_put(internals->sess_mp, session); return NULL; } -unsigned qat_crypto_sym_get_session_private_size( - struct rte_cryptodev *dev __rte_unused) +void * +qat_crypto_sym_configure_session(struct rte_cryptodev *dev, + struct rte_crypto_xform *xform, + void *session_private) { - return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8); + struct qat_pmd_private *internals = dev->data->dev_private; + + struct qat_session *session = session_private; + + int qat_cmd_id; + + /* Get requested QAT command id */ + qat_cmd_id = qat_get_cmd_id(xform); + if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) { + PMD_DRV_LOG(ERR, "Unsupported xform chain requested"); + goto error_out; + } + session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id; + + switch (session->qat_cmd) { + case ICP_QAT_FW_LA_CMD_CIPHER: + session = + qat_crypto_sym_configure_session_cipher(dev, xform, + session); + break; + case ICP_QAT_FW_LA_CMD_AUTH: + session = + qat_crypto_sym_configure_session_auth(dev, xform, + session); + break; + case ICP_QAT_FW_LA_CMD_CIPHER_HASH: + session = + qat_crypto_sym_configure_session_cipher(dev, xform, + session); + session = + qat_crypto_sym_configure_session_auth(dev, xform, + session); + break; + case ICP_QAT_FW_LA_CMD_HASH_CIPHER: + session = + qat_crypto_sym_configure_session_auth(dev, xform, + session); + session = + qat_crypto_sym_configure_session_cipher(dev, xform, + session); + break; + case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM: + case ICP_QAT_FW_LA_CMD_TRNG_TEST: + case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE: + case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE: + case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE: + case ICP_QAT_FW_LA_CMD_MGF1: + case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP: + case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP: + case ICP_QAT_FW_LA_CMD_DELIMITER: + PMD_DRV_LOG(ERR, "Unsupported Service %u", session->qat_cmd); + goto error_out; + default: + PMD_DRV_LOG(ERR, "Unsupported Service %u", session->qat_cmd); + goto error_out; + } + return session; +error_out: + rte_mempool_put(internals->sess_mp, session); + return NULL; } +unsigned +qat_crypto_sym_get_session_private_size(struct rte_cryptodev *dev __rte_unused) +{ + return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8); +} uint16_t qat_crypto_pkt_tx_burst(void *qp, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h index d680364..bd63b85 100644 --- a/drivers/crypto/qat/qat_crypto.h +++ b/drivers/crypto/qat/qat_crypto.h @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -107,6 +107,16 @@ qat_crypto_sym_get_session_private_size(struct rte_cryptodev *dev); extern void qat_crypto_sym_session_init(struct rte_mempool *mempool, void *priv_sess); +extern struct qat_session * +qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, + struct rte_crypto_xform *xform, + struct qat_session *session_private); + +extern struct qat_session * +qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, + struct rte_crypto_xform *xform, + struct qat_session *session_private); + extern void * qat_crypto_sym_configure_session(struct rte_cryptodev *dev, struct rte_crypto_xform *xform, void *session_private); -- 2.1.0