From: Arek Kusztal <arkadiuszx.kusztal@intel.com>
To: dev@dpdk.org
Cc: gakhil@marvell.com, fiona.trahe@intel.com,
roy.fan.zhang@intel.com,
Arek Kusztal <arkadiuszx.kusztal@intel.com>
Subject: [dpdk-dev] [PATCH v2 07/16] crypto/qat: rework init common header function
Date: Mon, 28 Jun 2021 17:34:25 +0100 [thread overview]
Message-ID: <20210628163434.77741-8-arkadiuszx.kusztal@intel.com> (raw)
In-Reply-To: <20210628163434.77741-1-arkadiuszx.kusztal@intel.com>
Rework init common header function for request
descriptor so it can be called only once.
Signed-off-by: Arek Kusztal <arkadiuszx.kusztal@intel.com>
---
drivers/crypto/qat/qat_sym.c | 25 +--
drivers/crypto/qat/qat_sym_session.c | 265 ++++++++++++++-------------
drivers/crypto/qat/qat_sym_session.h | 12 ++
3 files changed, 158 insertions(+), 144 deletions(-)
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 9415ec7d32..eef4a886c5 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -289,8 +289,9 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
auth_param = (void *)((uint8_t *)cipher_param +
ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
- if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
- ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+ ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+ !ctx->is_gmac) {
/* AES-GCM or AES-CCM */
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
@@ -303,7 +304,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
do_auth = 1;
do_cipher = 1;
}
- } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
do_auth = 1;
do_cipher = 0;
} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
@@ -383,15 +384,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
auth_param->u1.aad_adr = 0;
auth_param->u2.aad_sz = 0;
- /*
- * If len(iv)==12B fw computes J0
- */
- if (ctx->auth_iv.length == 12) {
- ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-
- }
} else {
auth_ofs = op->sym->auth.data.offset;
auth_len = op->sym->auth.data.length;
@@ -416,14 +408,7 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
- /*
- * If len(iv)==12B fw computes J0
- */
- if (ctx->cipher_iv.length == 12) {
- ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
- }
+
set_cipher_iv(ctx->cipher_iv.length,
ctx->cipher_iv.offset,
cipher_param, op, qat_req);
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 5140d61a9c..fd6fe4423d 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -69,6 +69,16 @@ qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
uint32_t aad_length,
uint32_t digestsize,
unsigned int operation);
+static void
+qat_sym_session_init_common_hdr(struct qat_sym_session *session);
+
+/* Req/cd init functions */
+
+static void
+qat_sym_session_finalize(struct qat_sym_session *session)
+{
+ qat_sym_session_init_common_hdr(session);
+}
/** Frees a context previously created
* Depends on openssl libcrypto
@@ -558,6 +568,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
int ret;
int qat_cmd_id;
+ int handle_mixed = 0;
/* Verify the session physical address is known */
rte_iova_t session_paddr = rte_mempool_virt2iova(session);
@@ -573,6 +584,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
offsetof(struct qat_sym_session, cd);
session->min_qat_dev_gen = QAT_GEN1;
+ session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
session->is_ucs = 0;
/* Get requested QAT command id */
@@ -612,8 +624,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
xform, session);
if (ret < 0)
return ret;
- /* Special handling of mixed hash+cipher algorithms */
- qat_sym_session_handle_mixed(dev, session);
+ handle_mixed = 1;
}
break;
case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
@@ -631,8 +642,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
xform, session);
if (ret < 0)
return ret;
- /* Special handling of mixed hash+cipher algorithms */
- qat_sym_session_handle_mixed(dev, session);
+ handle_mixed = 1;
}
break;
case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
@@ -652,72 +662,41 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
session->qat_cmd);
return -ENOTSUP;
}
+ qat_sym_session_finalize(session);
+ if (handle_mixed) {
+ /* Special handling of mixed hash+cipher algorithms */
+ qat_sym_session_handle_mixed(dev, session);
+ }
return 0;
}
static int
qat_sym_session_handle_single_pass(struct qat_sym_session *session,
- struct rte_crypto_aead_xform *aead_xform)
+ const struct rte_crypto_aead_xform *aead_xform)
{
- struct icp_qat_fw_la_cipher_req_params *cipher_param =
- (void *) &session->fw_req.serv_specif_rqpars;
-
session->is_single_pass = 1;
+ session->is_auth = 1;
session->min_qat_dev_gen = QAT_GEN3;
session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
+ /* Chacha-Poly is special case that use QAT CTR mode */
if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
session->qat_mode = ICP_QAT_HW_CIPHER_AEAD_MODE;
- ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
- session->fw_req.comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
} else {
- /* Chacha-Poly is special case that use QAT CTR mode */
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
}
session->cipher_iv.offset = aead_xform->iv.offset;
session->cipher_iv.length = aead_xform->iv.length;
- if (qat_sym_cd_cipher_set(session,
- aead_xform->key.data, aead_xform->key.length))
- return -EINVAL;
session->aad_len = aead_xform->aad_length;
session->digest_length = aead_xform->digest_length;
+
if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
session->auth_op = ICP_QAT_HW_AUTH_GENERATE;
- ICP_QAT_FW_LA_RET_AUTH_SET(
- session->fw_req.comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_RET_AUTH_RES);
} else {
session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
session->auth_op = ICP_QAT_HW_AUTH_VERIFY;
- ICP_QAT_FW_LA_CMP_AUTH_SET(
- session->fw_req.comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_CMP_AUTH_RES);
}
- ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
- session->fw_req.comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
- ICP_QAT_FW_LA_PROTO_SET(
- session->fw_req.comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_NO_PROTO);
- session->fw_req.comn_hdr.service_cmd_id =
- ICP_QAT_FW_LA_CMD_CIPHER;
- session->cd.cipher.cipher_config.val =
- ICP_QAT_HW_CIPHER_CONFIG_BUILD(
- ICP_QAT_HW_CIPHER_AEAD_MODE,
- session->qat_cipher_alg,
- ICP_QAT_HW_CIPHER_NO_CONVERT,
- session->qat_dir);
- QAT_FIELD_SET(session->cd.cipher.cipher_config.val,
- aead_xform->digest_length,
- QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
- QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
- session->cd.cipher.cipher_config.reserved =
- ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
- aead_xform->aad_length);
- cipher_param->spc_aad_sz = aead_xform->aad_length;
- cipher_param->spc_auth_res_sz = aead_xform->digest_length;
return 0;
}
@@ -737,6 +716,7 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
session->auth_iv.offset = auth_xform->iv.offset;
session->auth_iv.length = auth_xform->iv.length;
session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
+ session->is_auth = 1;
switch (auth_xform->algo) {
case RTE_CRYPTO_AUTH_SHA1:
@@ -791,6 +771,8 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
if (session->auth_iv.length == 0)
session->auth_iv.length = AES_GCM_J0_LEN;
+ else
+ session->is_iv12B = 1;
break;
case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
@@ -825,6 +807,7 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
}
if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+ session->is_gmac = 1;
if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
@@ -832,7 +815,6 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
* It needs to create cipher desc content first,
* then authentication
*/
-
if (qat_sym_cd_cipher_set(session,
auth_xform->key.data,
auth_xform->key.length))
@@ -866,8 +848,6 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
auth_xform->key.length))
return -EINVAL;
}
- /* Restore to authentication only only */
- session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
} else {
if (qat_sym_cd_auth_set(session,
key_data,
@@ -902,6 +882,8 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
session->cipher_iv.length = xform->aead.iv.length;
session->auth_mode = ICP_QAT_HW_AUTH_MODE1;
+ session->is_auth = 1;
+ session->digest_length = aead_xform->digest_length;
session->is_single_pass = 0;
switch (aead_xform->algo) {
@@ -913,15 +895,19 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
}
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
- if (qat_dev_gen == QAT_GEN3 && aead_xform->iv.length ==
- QAT_AES_GCM_SPC_IV_SIZE) {
- return qat_sym_session_handle_single_pass(session,
- aead_xform);
- }
- if (session->cipher_iv.length == 0)
- session->cipher_iv.length = AES_GCM_J0_LEN;
+
if (qat_dev_gen == QAT_GEN4)
session->is_ucs = 1;
+
+ if (session->cipher_iv.length == 0) {
+ session->cipher_iv.length = AES_GCM_J0_LEN;
+ break;
+ }
+ session->is_iv12B = 1;
+ if (qat_dev_gen == QAT_GEN3) {
+ qat_sym_session_handle_single_pass(session,
+ aead_xform);
+ }
break;
case RTE_CRYPTO_AEAD_AES_CCM:
if (qat_sym_validate_aes_key(aead_xform->key.length,
@@ -939,15 +925,20 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
return -EINVAL;
session->qat_cipher_alg =
ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305;
- return qat_sym_session_handle_single_pass(session,
+ qat_sym_session_handle_single_pass(session,
aead_xform);
+ break;
default:
QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
aead_xform->algo);
return -EINVAL;
}
- if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
+ if (session->is_single_pass) {
+ if (qat_sym_cd_cipher_set(session,
+ aead_xform->key.data, aead_xform->key.length))
+ return -EINVAL;
+ } else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
(aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
@@ -995,7 +986,6 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
return -EINVAL;
}
- session->digest_length = aead_xform->digest_length;
return 0;
}
@@ -1467,13 +1457,17 @@ static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
}
static void
-qat_sym_session_init_common_hdr(struct qat_sym_session *session,
- struct icp_qat_fw_comn_req_hdr *header,
- enum qat_sym_proto_flag proto_flags)
+qat_sym_session_init_common_hdr(struct qat_sym_session *session)
{
+ struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ enum qat_sym_proto_flag proto_flags = session->qat_proto_flag;
+ uint32_t slice_flags = session->slice_types;
+
header->hdr_flags =
ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+ header->service_cmd_id = session->qat_cmd;
header->comn_req_flags =
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
QAT_COMN_PTR_TYPE_FLAT);
@@ -1505,40 +1499,47 @@ qat_sym_session_init_common_hdr(struct qat_sym_session *session,
break;
}
- ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_UPDATE_STATE);
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
-
- if (session->is_ucs) {
+ /* More than one of the following flags can be set at once */
+ if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_SPC)) {
+ ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+ header->serv_specif_flags,
+ ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+ }
+ if (QAT_SESSION_IS_SLICE_SET(slice_flags, QAT_CRYPTO_SLICE_UCS)) {
ICP_QAT_FW_LA_SLICE_TYPE_SET(
- session->fw_req.comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
+ header->serv_specif_flags,
+ ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
}
-}
-/*
- * Snow3G and ZUC should never use this function
- * and set its protocol flag in both cipher and auth part of content
- * descriptor building function
- */
-static enum qat_sym_proto_flag
-qat_get_crypto_proto_flag(uint16_t flags)
-{
- int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
- enum qat_sym_proto_flag qat_proto_flag =
- QAT_CRYPTO_PROTO_FLAG_NONE;
+ if (session->is_auth) {
+ if (session->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_CMP_AUTH_RES);
+ } else if (session->auth_op == ICP_QAT_HW_AUTH_GENERATE) {
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ }
+ } else {
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ }
- switch (proto) {
- case ICP_QAT_FW_LA_GCM_PROTO:
- qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
- break;
- case ICP_QAT_FW_LA_CCM_PROTO:
- qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
- break;
+ if (session->is_iv12B) {
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ header->serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
}
- return qat_proto_flag;
+ ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_UPDATE_STATE);
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
}
int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
@@ -1554,8 +1555,12 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
enum icp_qat_hw_cipher_convert key_convert;
- enum qat_sym_proto_flag qat_proto_flag =
- QAT_CRYPTO_PROTO_FLAG_NONE;
+ struct icp_qat_fw_la_cipher_20_req_params *req_ucs =
+ (struct icp_qat_fw_la_cipher_20_req_params *)
+ &cdesc->fw_req.serv_specif_rqpars;
+ struct icp_qat_fw_la_cipher_req_params *req_cipher =
+ (struct icp_qat_fw_la_cipher_req_params *)
+ &cdesc->fw_req.serv_specif_rqpars;
uint32_t total_key_size;
uint16_t cipher_offset, cd_size;
uint32_t wordIndex = 0;
@@ -1591,9 +1596,16 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
/*
* CTR Streaming ciphers are a special case. Decrypt = encrypt
- * Overriding default values previously set
+ * Overriding default values previously set.
+ * Chacha20-Poly1305 is special case, CTR but single-pass
+ * so both direction need to be used.
*/
cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ if (cdesc->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305 &&
+ cdesc->auth_op == ICP_QAT_HW_AUTH_VERIFY) {
+ cdesc->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+ }
key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
} else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
|| cdesc->qat_cipher_alg ==
@@ -1601,6 +1613,8 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
+ else if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE)
+ key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
else
key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
@@ -1609,7 +1623,7 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
cipher_cd_ctrl->cipher_state_sz =
ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
- qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
+ cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
} else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
@@ -1619,33 +1633,24 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
} else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
- qat_proto_flag =
- qat_get_crypto_proto_flag(header->serv_specif_flags);
} else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
total_key_size = ICP_QAT_HW_DES_KEY_SZ;
cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
- qat_proto_flag =
- qat_get_crypto_proto_flag(header->serv_specif_flags);
} else if (cdesc->qat_cipher_alg ==
ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
cipher_cd_ctrl->cipher_state_sz =
ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
- qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
+ cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
cdesc->min_qat_dev_gen = QAT_GEN2;
} else {
total_key_size = cipherkeylen;
cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
- qat_proto_flag =
- qat_get_crypto_proto_flag(header->serv_specif_flags);
}
cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
- header->service_cmd_id = cdesc->qat_cmd;
- qat_sym_session_init_common_hdr(cdesc, header, qat_proto_flag);
-
cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
cipher20 = (struct icp_qat_hw_cipher_algo_blk20 *)cdesc->cd_cur_ptr;
cipher->cipher_config.val =
@@ -1670,6 +1675,7 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
} else if (cdesc->is_ucs) {
const uint8_t *final_key = cipherkey;
+ cdesc->slice_types |= QAT_CRYPTO_SLICE_UCS;
total_key_size = RTE_ALIGN_CEIL(cipherkeylen,
ICP_QAT_HW_AES_128_KEY_SZ);
cipher20->cipher_config.reserved[0] = 0;
@@ -1686,6 +1692,18 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
cipherkeylen;
}
+ if (cdesc->is_single_pass) {
+ QAT_FIELD_SET(cipher->cipher_config.val,
+ cdesc->digest_length,
+ QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+ QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+ /* UCS and SPC 1.8/2.0 share configuration of 2nd config word */
+ cdesc->cd.cipher.cipher_config.reserved =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(
+ cdesc->aad_len);
+ cdesc->slice_types |= QAT_CRYPTO_SLICE_SPC;
+ }
+
if (total_key_size > cipherkeylen) {
uint32_t padding_size = total_key_size-cipherkeylen;
if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
@@ -1704,6 +1722,20 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
cdesc->cd_cur_ptr += padding_size;
}
+ if (cdesc->is_ucs) {
+ /*
+ * These values match in terms of position auth
+ * slice request fields
+ */
+ req_ucs->spc_auth_res_sz = cdesc->digest_length;
+ if (!cdesc->is_gmac) {
+ req_ucs->spc_aad_sz = cdesc->aad_len;
+ req_ucs->spc_aad_offset = 0;
+ }
+ } else if (cdesc->is_single_pass) {
+ req_cipher->spc_aad_sz = cdesc->aad_len;
+ req_cipher->spc_auth_res_sz = cdesc->digest_length;
+ }
cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
@@ -1722,7 +1754,6 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
struct icp_qat_hw_cipher_algo_blk *cipherconfig;
struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
- struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
void *ptr = &req_tmpl->cd_ctrl;
struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
@@ -1735,8 +1766,6 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
uint32_t *aad_len = NULL;
uint32_t wordIndex = 0;
uint32_t *pTempKey;
- enum qat_sym_proto_flag qat_proto_flag =
- QAT_CRYPTO_PROTO_FLAG_NONE;
if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
@@ -1759,19 +1788,10 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
return -EFAULT;
}
- if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
- ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_RET_AUTH_RES);
- ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_CMP_AUTH_RES);
+ if (operation == RTE_CRYPTO_AUTH_OP_VERIFY)
cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
- } else {
- ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_RET_AUTH_RES);
- ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ else
cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
- }
/*
* Setup the inner hash config
@@ -1913,7 +1933,7 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
break;
case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
- qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
+ cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
if (qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
authkeylen, cdesc->cd_cur_ptr + state1_size,
@@ -1936,7 +1956,7 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
cdesc->aad_len = aad_length;
break;
case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
- qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
+ cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
state1_size = qat_hash_get_state1_size(
ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
@@ -1960,7 +1980,7 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
hash->auth_config.config =
ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
cdesc->qat_hash_alg, digestsize);
- qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
+ cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
state1_size = qat_hash_get_state1_size(
ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
@@ -1988,7 +2008,7 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
- qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
+ cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
state1_size = qat_hash_get_state1_size(
ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
@@ -2036,10 +2056,6 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
return -EFAULT;
}
- /* Request template setup */
- qat_sym_session_init_common_hdr(cdesc, header, qat_proto_flag);
- header->service_cmd_id = cdesc->qat_cmd;
-
/* Auth CD config setup */
hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
@@ -2248,6 +2264,7 @@ qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
ret = qat_sym_session_configure_cipher(dev, xform, session);
if (ret < 0)
return ret;
+ qat_sym_session_finalize(session);
return 0;
}
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index e003a34f7f..1568e09200 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -48,6 +48,13 @@
#define QAT_AES_CMAC_CONST_RB 0x87
+#define QAT_CRYPTO_SLICE_SPC 1
+#define QAT_CRYPTO_SLICE_UCS 2
+#define QAT_CRYPTO_SLICE_WCP 4
+
+#define QAT_SESSION_IS_SLICE_SET(flags, flag) \
+ (!!((flags) & (flag)))
+
enum qat_sym_proto_flag {
QAT_CRYPTO_PROTO_FLAG_NONE = 0,
QAT_CRYPTO_PROTO_FLAG_CCM = 1,
@@ -93,6 +100,11 @@ struct qat_sym_session {
uint8_t is_single_pass;
uint8_t is_single_pass_gmac;
uint8_t is_ucs;
+ uint8_t is_iv12B;
+ uint8_t is_gmac;
+ uint8_t is_auth;
+ uint32_t slice_types;
+ enum qat_sym_proto_flag qat_proto_flag;
};
int
--
2.30.2
next prev parent reply other threads:[~2021-06-28 16:35 UTC|newest]
Thread overview: 37+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-06-28 16:34 [dpdk-dev] [PATCH v2 00/16] Add support for fourth generation of Intel QuickAssist Technology devices Arek Kusztal
2021-06-28 16:34 ` [dpdk-dev] [PATCH v2 01/16] common/qat: rework qp per service function Arek Kusztal
2021-06-29 12:15 ` Zhang, Roy Fan
2021-06-28 16:34 ` [dpdk-dev] [PATCH v2 02/16] crypto/qat: add support for generation 4 devices Arek Kusztal
2021-06-29 12:15 ` Zhang, Roy Fan
2021-06-28 16:34 ` [dpdk-dev] [PATCH v2 03/16] crypto/qat: enable gen4 legacy algorithms Arek Kusztal
2021-06-29 12:18 ` Zhang, Roy Fan
2021-06-28 16:34 ` [dpdk-dev] [PATCH v2 04/16] crypto/qat: add gen4 ucs slice type, add ctr mode Arek Kusztal
2021-06-29 12:16 ` Zhang, Roy Fan
2021-06-28 16:34 ` [dpdk-dev] [PATCH v2 05/16] crypto/qat: rename content descriptor functions Arek Kusztal
2021-06-29 12:18 ` Zhang, Roy Fan
2021-06-28 16:34 ` [dpdk-dev] [PATCH v2 06/16] crypto/qat: add legacy gcm and ccm Arek Kusztal
2021-06-29 12:22 ` Zhang, Roy Fan
2021-06-28 16:34 ` Arek Kusztal [this message]
2021-06-29 12:23 ` [dpdk-dev] [PATCH v2 07/16] crypto/qat: rework init common header function Zhang, Roy Fan
2021-06-28 16:34 ` [dpdk-dev] [PATCH v2 08/16] crypto/qat: add aes gcm in ucs spc mode Arek Kusztal
2021-06-29 12:19 ` Zhang, Roy Fan
2021-06-28 16:34 ` [dpdk-dev] [PATCH v2 09/16] crypto/qat: add chacha-poly " Arek Kusztal
2021-06-29 12:20 ` Zhang, Roy Fan
2021-06-28 16:34 ` [dpdk-dev] [PATCH v2 10/16] crypto/qat: add gmac in legacy mode on gen 4 Arek Kusztal
2021-06-29 12:20 ` Zhang, Roy Fan
2021-06-28 16:34 ` [dpdk-dev] [PATCH v2 11/16] common/qat: add pf2vf communication in qat Arek Kusztal
2021-06-29 12:22 ` Zhang, Roy Fan
2021-06-28 16:34 ` [dpdk-dev] [PATCH v2 12/16] common/qat: reset ring pairs before setting gen4 Arek Kusztal
2021-06-29 12:21 ` Zhang, Roy Fan
2021-06-28 16:34 ` [dpdk-dev] [PATCH v2 13/16] common/qat: add service discovery to qat gen4 Arek Kusztal
2021-06-29 12:21 ` Zhang, Roy Fan
2021-06-28 16:34 ` [dpdk-dev] [PATCH v2 14/16] crypto/qat: update raw dp api Arek Kusztal
2021-06-29 13:06 ` Dybkowski, AdamX
2021-06-28 16:34 ` [dpdk-dev] [PATCH v2 15/16] crypto/qat: enable RAW API on QAT GEN1-3 only Arek Kusztal
2021-06-29 12:23 ` Zhang, Roy Fan
2021-06-28 16:34 ` [dpdk-dev] [PATCH v2 16/16] test/crypto: check if RAW API is supported Arek Kusztal
2021-06-29 12:24 ` Zhang, Roy Fan
2021-07-16 18:06 ` [dpdk-dev] [EXT] " Akhil Goyal
2021-07-19 12:39 ` Dybkowski, AdamX
2021-06-29 12:14 ` [dpdk-dev] [PATCH v2 00/16] Add support for fourth generation of Intel QuickAssist Technology devices Zhang, Roy Fan
2021-07-16 18:09 ` [dpdk-dev] [EXT] " Akhil Goyal
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210628163434.77741-8-arkadiuszx.kusztal@intel.com \
--to=arkadiuszx.kusztal@intel.com \
--cc=dev@dpdk.org \
--cc=fiona.trahe@intel.com \
--cc=gakhil@marvell.com \
--cc=roy.fan.zhang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).