DPDK patches and discussions
 help / color / mirror / Atom feed
From: Kai Ji <kai.ji@intel.com>
To: dev@dpdk.org
Cc: gakhil@marvell.com, Kai Ji <kai.ji@intel.com>
Subject: [dpdk-dev] [dpdk-dev v4 03/11] crypto/qat: rework session APIs
Date: Fri,  5 Nov 2021 00:19:24 +0000	[thread overview]
Message-ID: <20211105001932.28784-4-kai.ji@intel.com> (raw)
In-Reply-To: <20211105001932.28784-1-kai.ji@intel.com>

The patch introduce set_session methods to qat gen dev ops
Replace min_qat_dev_gen_id with dev_id, the session will become
invalid if the device generation id is not matching during session
init and crypto ops

Signed-off-by: Kai Ji <kai.ji@intel.com>
---
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  87 ++++++-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 253 +++++++++++++++++++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 121 +++++++++
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    |  62 +++++
 drivers/crypto/qat/qat_crypto.c              |   1 +
 drivers/crypto/qat/qat_crypto.h              |   5 +
 drivers/crypto/qat/qat_sym.c                 |   8 +-
 drivers/crypto/qat/qat_sym_session.c         | 106 +-------
 drivers/crypto/qat/qat_sym_session.h         |   2 +-
 9 files changed, 544 insertions(+), 101 deletions(-)

diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
index b4ec440e05..621ff85638 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c
@@ -166,6 +166,90 @@ qat_sym_crypto_qp_setup_gen2(struct rte_cryptodev *dev, uint16_t qp_id,
 	return 0;
 }
 
+void
+qat_sym_session_set_ext_hash_flags_gen2(struct qat_sym_session *session,
+		uint8_t hash_flag)
+{
+	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
+	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
+			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
+			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
+
+	/* Set the Use Extended Protocol Flags bit in LW 1 */
+	QAT_FIELD_SET(header->comn_req_flags,
+			QAT_COMN_EXT_FLAGS_USED,
+			QAT_COMN_EXT_FLAGS_BITPOS,
+			QAT_COMN_EXT_FLAGS_MASK);
+
+	/* Set Hash Flags in LW 28 */
+	cd_ctrl->hash_flags |= hash_flag;
+
+	/* Set proto flags in LW 1 */
+	switch (session->qat_cipher_alg) {
+	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_SNOW_3G_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags,
+				ICP_QAT_FW_LA_ZUC_3G_PROTO);
+		break;
+	default:
+		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+				ICP_QAT_FW_LA_NO_PROTO);
+		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
+				header->serv_specif_flags, 0);
+		break;
+	}
+}
+
+static int
+qat_sym_crypto_set_session_gen2(void *cdev, void *session)
+{
+	struct rte_cryptodev *dev = cdev;
+	struct qat_sym_session *ctx = session;
+	const struct qat_cryptodev_private *qat_private =
+			dev->data->dev_private;
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	if (ret == 0 || ret != -ENOTSUP)
+		return ret;
+
+	/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+	 * but some are not supported by GEN2, so checking here
+	 */
+	if ((qat_private->internal_capabilities &
+			QAT_SYM_CAP_MIXED_CRYPTO) == 0)
+		return -ENOTSUP;
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+	}
+
+	return 0;
+}
+
 struct rte_cryptodev_ops qat_sym_crypto_ops_gen2 = {
 
 	/* Device related operations */
@@ -204,9 +288,10 @@ RTE_INIT(qat_sym_crypto_gen2_init)
 	qat_sym_gen_dev_ops[QAT_GEN2].cryptodev_ops = &qat_sym_crypto_ops_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_capabilities =
 			qat_sym_crypto_cap_get_gen2;
+	qat_sym_gen_dev_ops[QAT_GEN2].set_session =
+			qat_sym_crypto_set_session_gen2;
 	qat_sym_gen_dev_ops[QAT_GEN2].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
-
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN2].create_security_ctx =
 			qat_sym_create_security_gen1;
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index d3336cf4a1..6baf1810ed 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -143,6 +143,256 @@ qat_sym_crypto_cap_get_gen3(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen3(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass) {
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN3 uses single pass to treat AEAD as
+		 * cipher operation
+		 */
+		cipher_param = (void *)&req->serv_specif_rqpars;
+
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length, req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len - ofs.ofs.cipher.head -
+				ofs.ofs.cipher.tail;
+
+		cipher_param->spc_aad_addr = aad->iova;
+		cipher_param->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static __rte_always_inline void
+enqueue_one_auth_job_gen3(struct qat_sym_session *ctx,
+	struct qat_sym_op_cookie *cookie,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *auth_iv,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl;
+	struct icp_qat_fw_la_cipher_req_params *cipher_param;
+	uint32_t ver_key_offset;
+	uint32_t auth_data_len = data_len - ofs.ofs.auth.head -
+			ofs.ofs.auth.tail;
+
+	if (!ctx->is_single_pass_gmac ||
+			(auth_data_len > QAT_AES_GMAC_SPC_MAX_SIZE)) {
+		enqueue_one_auth_job_gen1(ctx, req, digest, auth_iv, ofs,
+				data_len);
+		return;
+	}
+
+	cipher_cd_ctrl = (void *) &req->cd_ctrl;
+	cipher_param = (void *)&req->serv_specif_rqpars;
+	ver_key_offset = sizeof(struct icp_qat_hw_auth_setup) +
+			ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+			ICP_QAT_HW_GALOIS_H_SZ + ICP_QAT_HW_GALOIS_LEN_A_SZ +
+			ICP_QAT_HW_GALOIS_E_CTR0_SZ +
+			sizeof(struct icp_qat_hw_cipher_config);
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+		ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+		/* AES-GMAC */
+		qat_set_cipher_iv(cipher_param, auth_iv, ctx->auth_iv.length,
+				req);
+	}
+
+	/* Fill separate Content Descriptor for this op */
+	rte_memcpy(cookie->opt.spc_gmac.cd_cipher.key,
+			ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+				ctx->cd.cipher.key :
+				RTE_PTR_ADD(&ctx->cd, ver_key_offset),
+			ctx->auth_key_length);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.val =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD(
+				ICP_QAT_HW_CIPHER_AEAD_MODE,
+				ctx->qat_cipher_alg,
+				ICP_QAT_HW_CIPHER_NO_CONVERT,
+				(ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE ?
+					ICP_QAT_HW_CIPHER_ENCRYPT :
+					ICP_QAT_HW_CIPHER_DECRYPT));
+	QAT_FIELD_SET(cookie->opt.spc_gmac.cd_cipher.cipher_config.val,
+			ctx->digest_length,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS,
+			QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK);
+	cookie->opt.spc_gmac.cd_cipher.cipher_config.reserved =
+			ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(auth_data_len);
+
+	/* Update the request */
+	req->cd_pars.u.s.content_desc_addr =
+			cookie->opt.spc_gmac.cd_phys_addr;
+	req->cd_pars.u.s.content_desc_params_sz = RTE_ALIGN_CEIL(
+			sizeof(struct icp_qat_hw_cipher_config) +
+			ctx->auth_key_length, 8) >> 3;
+	req->comn_mid.src_length = data_len;
+	req->comn_mid.dst_length = 0;
+
+	cipher_param->spc_aad_addr = 0;
+	cipher_param->spc_auth_res_addr = digest->iova;
+	cipher_param->spc_aad_sz = auth_data_len;
+	cipher_param->reserved = 0;
+	cipher_param->spc_auth_res_sz = ctx->digest_length;
+
+	req->comn_hdr.service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+	cipher_cd_ctrl->cipher_cfg_offset = 0;
+	ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+	ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+	ICP_QAT_FW_LA_SINGLE_PASS_PROTO_FLAG_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_SINGLE_PASS_PROTO);
+	ICP_QAT_FW_LA_PROTO_SET(
+			req->comn_hdr.serv_specif_flags,
+			ICP_QAT_FW_LA_NO_PROTO);
+}
+
+static int
+qat_sym_build_op_aead_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen3(ctx, req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_build_op_auth_gen3(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr auth_iv;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_auth(op, ctx, &in_sgl, &out_sgl,
+			NULL, &auth_iv, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_auth_job_gen3(ctx, cookie, req, &digest, &auth_iv,
+			ofs, total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(req, ctx, in_sgl.vec, in_sgl.num, NULL,
+			&auth_iv, NULL, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen3(void *cdev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN3 */
+	if (ctx->is_single_pass)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen3;
+	else if (ctx->is_single_pass_gmac)
+		ctx->build_request[proc_type] = qat_sym_build_op_auth_gen3;
+
+	if (ret == 0)
+		return ret;
+
+	/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+	 * this is addressed by GEN3
+	 */
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+	}
+
+	return 0;
+}
+
 RTE_INIT(qat_sym_crypto_gen3_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN3].cryptodev_ops = &qat_sym_crypto_ops_gen1;
@@ -150,6 +400,8 @@ RTE_INIT(qat_sym_crypto_gen3_init)
 			qat_sym_crypto_cap_get_gen3;
 	qat_sym_gen_dev_ops[QAT_GEN3].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN3].set_session =
+			qat_sym_crypto_set_session_gen3;
 #ifdef RTE_LIB_SECURITY
 	qat_sym_gen_dev_ops[QAT_GEN3].create_security_ctx =
 			qat_sym_create_security_gen1;
@@ -161,4 +413,5 @@ RTE_INIT(qat_asym_crypto_gen3_init)
 	qat_asym_gen_dev_ops[QAT_GEN3].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN3].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN3].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 37a58c026f..fa6a7ee2fd 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -103,11 +103,131 @@ qat_sym_crypto_cap_get_gen4(struct qat_pci_device *qat_dev __rte_unused)
 	return capa_info;
 }
 
+static __rte_always_inline void
+enqueue_one_aead_job_gen4(struct qat_sym_session *ctx,
+	struct icp_qat_fw_la_bulk_req *req,
+	struct rte_crypto_va_iova_ptr *iv,
+	struct rte_crypto_va_iova_ptr *digest,
+	struct rte_crypto_va_iova_ptr *aad,
+	union rte_crypto_sym_ofs ofs, uint32_t data_len)
+{
+	if (ctx->is_single_pass && ctx->is_ucs) {
+		struct icp_qat_fw_la_cipher_20_req_params *cipher_param_20 =
+			(void *)&req->serv_specif_rqpars;
+		struct icp_qat_fw_la_cipher_req_params *cipher_param =
+			(void *)&req->serv_specif_rqpars;
+
+		/* QAT GEN4 uses single pass to treat AEAD as cipher
+		 * operation
+		 */
+		qat_set_cipher_iv(cipher_param, iv, ctx->cipher_iv.length,
+				req);
+		cipher_param->cipher_offset = ofs.ofs.cipher.head;
+		cipher_param->cipher_length = data_len -
+				ofs.ofs.cipher.head - ofs.ofs.cipher.tail;
+
+		cipher_param_20->spc_aad_addr = aad->iova;
+		cipher_param_20->spc_auth_res_addr = digest->iova;
+
+		return;
+	}
+
+	enqueue_one_aead_job_gen1(ctx, req, iv, digest, aad, ofs, data_len);
+}
+
+static int
+qat_sym_build_op_aead_gen4(void *in_op, struct qat_sym_session *ctx,
+		uint8_t *out_msg, void *op_cookie)
+{
+	register struct icp_qat_fw_la_bulk_req *qat_req;
+	struct rte_crypto_op *op = in_op;
+	struct qat_sym_op_cookie *cookie = op_cookie;
+	struct rte_crypto_sgl in_sgl, out_sgl;
+	struct rte_crypto_vec in_vec[QAT_SYM_SGL_MAX_NUMBER],
+			out_vec[QAT_SYM_SGL_MAX_NUMBER];
+	struct rte_crypto_va_iova_ptr cipher_iv;
+	struct rte_crypto_va_iova_ptr aad;
+	struct rte_crypto_va_iova_ptr digest;
+	union rte_crypto_sym_ofs ofs;
+	int32_t total_len;
+
+	in_sgl.vec = in_vec;
+	out_sgl.vec = out_vec;
+
+	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+
+	ofs.raw = qat_sym_convert_op_to_vec_aead(op, ctx, &in_sgl, &out_sgl,
+			&cipher_iv, &aad, &digest);
+	if (unlikely(ofs.raw == UINT64_MAX)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	total_len = qat_sym_build_req_set_data(qat_req, in_op, cookie,
+			in_sgl.vec, in_sgl.num, out_sgl.vec, out_sgl.num);
+	if (unlikely(total_len < 0)) {
+		op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+		return -EINVAL;
+	}
+
+	enqueue_one_aead_job_gen4(ctx, qat_req, &cipher_iv, &digest, &aad, ofs,
+		total_len);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+	qat_sym_debug_log_dump(qat_req, ctx, in_sgl.vec, in_sgl.num, &cipher_iv,
+			NULL, &aad, &digest);
+#endif
+
+	return 0;
+}
+
+static int
+qat_sym_crypto_set_session_gen4(void *cdev, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int ret;
+
+	ret = qat_sym_crypto_set_session_gen1(cdev, session);
+	/* special single pass build request for GEN4 */
+	if (ctx->is_single_pass && ctx->is_ucs)
+		ctx->build_request[proc_type] = qat_sym_build_op_aead_gen4;
+	if (ret == 0)
+		return ret;
+
+	/* GEN1 returning -ENOTSUP as it cannot handle some mixed algo,
+	 * this is addressed by GEN4
+	 */
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx,
+			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		qat_sym_session_set_ext_hash_flags_gen2(ctx, 0);
+	}
+
+	return 0;
+}
+
 RTE_INIT(qat_sym_crypto_gen4_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN4].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_capabilities =
 			qat_sym_crypto_cap_get_gen4;
+	qat_sym_gen_dev_ops[QAT_GEN4].set_session =
+			qat_sym_crypto_set_session_gen4;
 	qat_sym_gen_dev_ops[QAT_GEN4].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
@@ -121,4 +241,5 @@ RTE_INIT(qat_asym_crypto_gen4_init)
 	qat_asym_gen_dev_ops[QAT_GEN4].cryptodev_ops = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_capabilities = NULL;
 	qat_asym_gen_dev_ops[QAT_GEN4].get_feature_flags = NULL;
+	qat_asym_gen_dev_ops[QAT_GEN4].set_session = NULL;
 }
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index 5b96e626f8..bcdf0172fe 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -6,6 +6,7 @@
 #ifdef RTE_LIB_SECURITY
 #include <rte_security_driver.h>
 #endif
+#include <cryptodev_pmd.h>
 
 #include "adf_transport_access_macros.h"
 #include "icp_qat_fw.h"
@@ -454,12 +455,73 @@ qat_sym_create_security_gen1(void *cryptodev)
 }
 
 #endif
+int
+qat_sym_crypto_set_session_gen1(void *cryptodev __rte_unused, void *session)
+{
+	struct qat_sym_session *ctx = session;
+	qat_sym_build_request_t build_request = NULL;
+	enum rte_proc_type_t proc_type = rte_eal_process_type();
+	int handle_mixed = 0;
+
+	if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+			ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
+			!ctx->is_gmac) {
+		/* AES-GCM or AES-CCM */
+		if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+			(ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+			&& ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+			&& ctx->qat_hash_alg ==
+					ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+			/* do_aead = 1; */
+			build_request = qat_sym_build_op_aead_gen1;
+		} else {
+			/* do_auth = 1; do_cipher = 1; */
+			build_request = qat_sym_build_op_chain_gen1;
+			handle_mixed = 1;
+		}
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx->is_gmac) {
+		/* do_auth = 1; do_cipher = 0;*/
+		build_request = qat_sym_build_op_auth_gen1;
+	} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+		/* do_auth = 0; do_cipher = 1; */
+		build_request = qat_sym_build_op_cipher_gen1;
+	}
+
+	if (!build_request)
+		return 0;
+	ctx->build_request[proc_type] = build_request;
+
+	if (!handle_mixed)
+		return 0;
+
+	if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+		return -ENOTSUP;
+	} else if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
+			ctx->qat_cipher_alg !=
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+		return -ENOTSUP;
+	} else if ((ctx->aes_cmac ||
+			ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
+			(ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+			ctx->qat_cipher_alg ==
+			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
 
 RTE_INIT(qat_sym_crypto_gen1_init)
 {
 	qat_sym_gen_dev_ops[QAT_GEN1].cryptodev_ops = &qat_sym_crypto_ops_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_capabilities =
 			qat_sym_crypto_cap_get_gen1;
+	qat_sym_gen_dev_ops[QAT_GEN1].set_session =
+			qat_sym_crypto_set_session_gen1;
 	qat_sym_gen_dev_ops[QAT_GEN1].get_feature_flags =
 			qat_sym_crypto_feature_flags_get_gen1;
 #ifdef RTE_LIB_SECURITY
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 84c26a8062..ee878e47f1 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -2,6 +2,7 @@
  * Copyright(c) 2021 Intel Corporation
  */
 
+#include <cryptodev_pmd.h>
 #include "qat_device.h"
 #include "qat_qp.h"
 #include "qat_crypto.h"
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 6eaa15b975..a32c716d28 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -48,15 +48,20 @@ typedef uint64_t (*get_feature_flags_t)(struct qat_pci_device *qat_dev);
 
 typedef void * (*create_security_ctx_t)(void *cryptodev);
 
+typedef int (*set_session_t)(void *cryptodev, void *session);
+
 struct qat_crypto_gen_dev_ops {
 	get_feature_flags_t get_feature_flags;
 	get_capabilities_info_t get_capabilities;
 	struct rte_cryptodev_ops *cryptodev_ops;
+	set_session_t set_session;
 #ifdef RTE_LIB_SECURITY
 	create_security_ctx_t create_security_ctx;
 #endif
 };
 
+extern struct qat_crypto_gen_dev_ops qat_sym_gen_dev_ops[];
+
 int
 qat_cryptodev_config(struct rte_cryptodev *dev,
 		struct rte_cryptodev_config *config);
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 93b257522b..7481607ff8 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -212,7 +212,7 @@ handle_spc_gmac(struct qat_sym_session *ctx, struct rte_crypto_op *op,
 
 int
 qat_sym_build_request(void *in_op, uint8_t *out_msg,
-		void *op_cookie, enum qat_device_gen qat_dev_gen)
+		void *op_cookie, __rte_unused enum qat_device_gen qat_dev_gen)
 {
 	int ret = 0;
 	struct qat_sym_session *ctx = NULL;
@@ -277,12 +277,6 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
 		return -EINVAL;
 	}
 
-	if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
-		QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
-		op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-		return -EINVAL;
-	}
-
 	qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
 	rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
 	qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 8ca475ca8b..52837d7c9c 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -486,80 +486,6 @@ qat_sym_session_configure(struct rte_cryptodev *dev,
 	return 0;
 }
 
-static void
-qat_sym_session_set_ext_hash_flags(struct qat_sym_session *session,
-		uint8_t hash_flag)
-{
-	struct icp_qat_fw_comn_req_hdr *header = &session->fw_req.comn_hdr;
-	struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *cd_ctrl =
-			(struct icp_qat_fw_cipher_auth_cd_ctrl_hdr *)
-			session->fw_req.cd_ctrl.content_desc_ctrl_lw;
-
-	/* Set the Use Extended Protocol Flags bit in LW 1 */
-	QAT_FIELD_SET(header->comn_req_flags,
-			QAT_COMN_EXT_FLAGS_USED,
-			QAT_COMN_EXT_FLAGS_BITPOS,
-			QAT_COMN_EXT_FLAGS_MASK);
-
-	/* Set Hash Flags in LW 28 */
-	cd_ctrl->hash_flags |= hash_flag;
-
-	/* Set proto flags in LW 1 */
-	switch (session->qat_cipher_alg) {
-	case ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_SNOW_3G_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	case ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags,
-				ICP_QAT_FW_LA_ZUC_3G_PROTO);
-		break;
-	default:
-		ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
-				ICP_QAT_FW_LA_NO_PROTO);
-		ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(
-				header->serv_specif_flags, 0);
-		break;
-	}
-}
-
-static void
-qat_sym_session_handle_mixed(const struct rte_cryptodev *dev,
-		struct qat_sym_session *session)
-{
-	const struct qat_cryptodev_private *qat_private =
-			dev->data->dev_private;
-	enum qat_device_gen min_dev_gen = (qat_private->internal_capabilities &
-			QAT_SYM_CAP_MIXED_CRYPTO) ? QAT_GEN2 : QAT_GEN3;
-
-	if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_ZUC_EIA3_BITPOS);
-	} else if (session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 &&
-			session->qat_cipher_alg !=
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session,
-			1 << ICP_QAT_FW_AUTH_HDR_FLAG_SNOW3G_UIA2_BITPOS);
-	} else if ((session->aes_cmac ||
-			session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_NULL) &&
-			(session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
-			session->qat_cipher_alg ==
-			ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)) {
-		session->min_qat_dev_gen = min_dev_gen;
-		qat_sym_session_set_ext_hash_flags(session, 0);
-	}
-}
-
 int
 qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		struct rte_crypto_sym_xform *xform, void *session_private)
@@ -569,7 +495,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
 	int ret;
 	int qat_cmd_id;
-	int handle_mixed = 0;
 
 	/* Verify the session physical address is known */
 	rte_iova_t session_paddr = rte_mempool_virt2iova(session);
@@ -584,7 +509,7 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
+	session->dev_id = internals->dev_id;
 	session->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_NONE;
 	session->is_ucs = 0;
 
@@ -625,7 +550,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
@@ -643,7 +567,6 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 					xform, session);
 			if (ret < 0)
 				return ret;
-			handle_mixed = 1;
 		}
 		break;
 	case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
@@ -664,12 +587,9 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
 		return -ENOTSUP;
 	}
 	qat_sym_session_finalize(session);
-	if (handle_mixed) {
-		/* Special handling of mixed hash+cipher algorithms */
-		qat_sym_session_handle_mixed(dev, session);
-	}
 
-	return 0;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
+			(void *)session);
 }
 
 static int
@@ -678,7 +598,6 @@ qat_sym_session_handle_single_pass(struct qat_sym_session *session,
 {
 	session->is_single_pass = 1;
 	session->is_auth = 1;
-	session->min_qat_dev_gen = QAT_GEN3;
 	session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER;
 	/* Chacha-Poly is special case that use QAT CTR mode */
 	if (aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) {
@@ -1205,9 +1124,10 @@ static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
 	return 0;
 }
 
-static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
-			uint8_t *data_in,
-			uint8_t *data_out)
+static int
+partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
+		uint8_t *data_in,
+		uint8_t *data_out)
 {
 	int digest_size;
 	uint8_t digest[qat_hash_get_digest_size(
@@ -1654,7 +1574,6 @@ int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
 		cipher_cd_ctrl->cipher_state_sz =
 			ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
 		cdesc->qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 	} else {
 		total_key_size = cipherkeylen;
 		cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -2002,7 +1921,6 @@ int qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 		memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
 		cd_extra_size += ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
 		auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
-		cdesc->min_qat_dev_gen = QAT_GEN2;
 
 		break;
 	case ICP_QAT_HW_AUTH_ALGO_MD5:
@@ -2263,8 +2181,6 @@ qat_sec_session_set_docsis_parameters(struct rte_cryptodev *dev,
 	session->cd_paddr = session_paddr +
 			offsetof(struct qat_sym_session, cd);
 
-	session->min_qat_dev_gen = QAT_GEN1;
-
 	/* Get requested QAT command id - should be cipher */
 	qat_cmd_id = qat_get_cmd_id(xform);
 	if (qat_cmd_id != ICP_QAT_FW_LA_CMD_CIPHER) {
@@ -2289,6 +2205,9 @@ qat_security_session_create(void *dev,
 {
 	void *sess_private_data;
 	struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+	struct qat_cryptodev_private *internals = cdev->data->dev_private;
+	enum qat_device_gen qat_dev_gen = internals->qat_dev->qat_dev_gen;
+	struct qat_sym_session *sym_session = NULL;
 	int ret;
 
 	if (conf->action_type != RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL ||
@@ -2312,8 +2231,11 @@ qat_security_session_create(void *dev,
 	}
 
 	set_sec_session_private_data(sess, sess_private_data);
+	sym_session = (struct qat_sym_session *)sess_private_data;
+	sym_session->dev_id = internals->dev_id;
 
-	return ret;
+	return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)cdev,
+			sess_private_data);
 }
 
 int
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index 33f977a4e3..581bce3790 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -100,7 +100,7 @@ struct qat_sym_session {
 	uint16_t auth_key_length;
 	uint16_t digest_length;
 	rte_spinlock_t lock;	/* protects this struct */
-	enum qat_device_gen min_qat_dev_gen;
+	uint16_t dev_id;
 	uint8_t aes_cmac;
 	uint8_t is_single_pass;
 	uint8_t is_single_pass_gmac;
-- 
2.17.1


  parent reply	other threads:[~2021-11-05  0:19 UTC|newest]

Thread overview: 156+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-26 17:25 [dpdk-dev] [dpdk-dev v1 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 1/7] crypro/qat: qat driver refactor skeleton Kai Ji
2021-10-29 13:58   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 2/7] crypto/qat: qat driver sym op refactor Kai Ji
2021-10-29 14:26   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 3/7] crypto/qat: qat driver asym " Kai Ji
2021-10-29 14:36   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 4/7] crypto/qat: qat driver session method rework Kai Ji
2021-10-29 14:40   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 5/7] crypto/qat: qat driver datapath rework Kai Ji
2021-10-29 14:41   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 6/7] app/test: cryptodev test fix Kai Ji
2021-10-29 14:43   ` Zhang, Roy Fan
2021-10-26 17:25 ` [dpdk-dev] [dpdk-dev v1 7/7] crypto/qat: qat driver rework clean up Kai Ji
2021-10-29 14:46   ` Zhang, Roy Fan
2021-11-01 23:12 ` [dpdk-dev] [dpdk-dev v2 0/7] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 1/7] crypro/qat: qat driver refactor skeleton Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 2/7] crypto/qat: qat driver sym op refactor Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 3/7] crypto/qat: qat driver asym " Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 4/7] crypto/qat: qat driver session method rework Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 5/7] crypto/qat: qat driver datapath rework Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 6/7] app/test: cryptodev test fix Kai Ji
2021-11-01 23:12   ` [dpdk-dev] [dpdk-dev v2 7/7] crypto/qat: qat driver rework clean up Kai Ji
2021-11-02 13:49   ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 1/8] crypro/qat: qat driver refactor skeleton Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 2/8] crypto/qat: qat driver sym op refactor Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 3/8] crypto/qat: qat driver asym " Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 4/8] crypto/qat: qat driver session method rework Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 5/8] crypto/qat: qat driver datapath rework Kai Ji
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 6/8] crypto/qat: support sgl oop operation Kai Ji
2021-11-03 15:46       ` Zhang, Roy Fan
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 7/8] app/test: cryptodev test fix Kai Ji
2021-11-03 15:45       ` Zhang, Roy Fan
2021-11-02 13:49     ` [dpdk-dev] [dpdk-dev v3 8/8] crypto/qat: qat driver rework clean up Kai Ji
2021-11-03 15:46     ` [dpdk-dev] [dpdk-dev v3 0/8] drivers/qat: QAT symmetric crypto datapatch rework Zhang, Roy Fan
2021-11-03 18:49     ` [dpdk-dev] [EXT] " Akhil Goyal
2021-11-05  0:19     ` [dpdk-dev] [dpdk-dev v4 00/11] " Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 01/11] common/qat: define build op request and dequeue op Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 02/11] crypto/qat: sym build op request specific implementation Kai Ji
2021-11-05  0:19       ` Kai Ji [this message]
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 04/11] crypto/qat: asym " Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 05/11] crypto/qat: unify sym pmd apis Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 06/11] crypto/qat: unify qat asym " Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 07/11] crypto/qat: op burst data path rework Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 08/11] compress/qat: comp dequeue burst update Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 09/11] crypto/qat: raw dp api integration Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 10/11] crypto/qat: support out of place SG list Kai Ji
2021-11-05  0:19       ` [dpdk-dev] [dpdk-dev v4 11/11] test/cryptodev: fix incomplete data length Kai Ji
2022-01-28 18:23         ` [dpdk-dev v5 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 01/10] common/qat: define build op request and dequeue op Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 02/10] crypto/qat: sym build op request specific implementation Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 03/10] crypto/qat: qat generation specific enqueue Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 04/10] crypto/qat: rework session APIs Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 07/10] crypto/qat: unify qat asym " Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 08/10] crypto/qat: op burst data path rework Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 09/10] crypto/qat: raw dp api integration Kai Ji
2022-01-28 18:23           ` [dpdk-dev v5 10/10] crypto/qat: support out of place SG list Kai Ji
2022-02-04 18:50           ` [dpdk-dev v6 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 01/10] common/qat: define build op request and dequeue op Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 02/10] crypto/qat: sym build op request specific implementation Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 03/10] crypto/qat: qat generation specific enqueue Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 04/10] crypto/qat: rework session APIs Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 07/10] crypto/qat: unify qat asym " Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 08/10] crypto/qat: op burst data path rework Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 09/10] crypto/qat: raw dp api integration Kai Ji
2022-02-04 18:50             ` [dpdk-dev v6 10/10] crypto/qat: support out of place SG list Kai Ji
2022-02-08 18:14               ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-02-08 18:14                 ` [dpdk-dev v7 01/10] common/qat: define build op request and dequeue op Kai Ji
2022-02-09 10:22                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 02/10] crypto/qat: sym build op request specific implementation Kai Ji
2022-02-09 10:22                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 03/10] crypto/qat: qat generation specific enqueue Kai Ji
2022-02-09 10:23                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 04/10] crypto/qat: rework session APIs Kai Ji
2022-02-09 10:23                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
2022-02-09 10:23                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
2022-02-09 10:24                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 07/10] crypto/qat: unify qat asym " Kai Ji
2022-02-09 10:24                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 08/10] crypto/qat: op burst data path rework Kai Ji
2022-02-09 10:24                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 09/10] crypto/qat: raw dp api integration Kai Ji
2022-02-09 10:25                   ` Zhang, Roy Fan
2022-02-08 18:14                 ` [dpdk-dev v7 10/10] crypto/qat: support out of place SG list Kai Ji
2022-02-09 10:25                   ` Zhang, Roy Fan
2022-02-09 10:20                 ` [dpdk-dev v7 00/10] drivers/qat: QAT symmetric crypto datapatch rework Zhang, Roy Fan
2022-02-12 11:32                   ` Akhil Goyal
2022-02-17 16:28                 ` [dpdk-dev v8 " Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 01/10] common/qat: define build op request and dequeue op Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 02/10] crypto/qat: sym build op request specific implementation Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 03/10] crypto/qat: qat generation specific enqueue Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 04/10] crypto/qat: rework session APIs Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 05/10] crypto/qat: rework asymmetric crypto build operation Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 06/10] crypto/qat: unify qat sym pmd apis Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 07/10] crypto/qat: unify qat asym " Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 08/10] crypto/qat: op burst data path rework Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 09/10] crypto/qat: raw dp api integration Kai Ji
2022-02-17 16:29                   ` [dpdk-dev v8 10/10] crypto/qat: support out of place SG list Kai Ji
2022-02-17 17:59                   ` [EXT] [dpdk-dev v8 00/10] drivers/qat: QAT symmetric crypto datapatch rework Akhil Goyal
2022-02-18 17:15                   ` [dpdk-dev v9 0/9] " Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 1/9] common/qat: define build request and dequeue ops Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 2/9] crypto/qat: support symmetric build op request Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 3/9] crypto/qat: rework session functions Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 4/9] crypto/qat: rework asymmetric op build operation Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 5/9] crypto/qat: unify symmetric functions Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 6/9] crypto/qat: unify asymmetric functions Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 7/9] crypto/qat: rework burst data path Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 8/9] crypto/qat: unify raw data path functions Kai Ji
2022-02-18 17:15                     ` [dpdk-dev v9 9/9] crypto/qat: support out of place SG list Kai Ji
2022-02-22 17:02                     ` [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-02-22 17:02                       ` [dpdk-dev v10 1/9] common/qat: define build request and dequeue ops Kai Ji
2022-02-22 18:52                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 2/9] crypto/qat: support symmetric build op request Kai Ji
2022-02-22 18:52                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 3/9] crypto/qat: rework session functions Kai Ji
2022-02-22 18:53                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 4/9] crypto/qat: rework asymmetric op build operation Kai Ji
2022-02-22 18:53                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 5/9] crypto/qat: unify symmetric functions Kai Ji
2022-02-22 18:53                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 6/9] crypto/qat: unify asymmetric functions Kai Ji
2022-02-22 18:52                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 7/9] crypto/qat: rework burst data path Kai Ji
2022-02-22 18:54                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 8/9] crypto/qat: unify raw data path functions Kai Ji
2022-02-22 18:54                         ` Zhang, Roy Fan
2022-02-22 17:02                       ` [dpdk-dev v10 9/9] crypto/qat: support out of place SG list Kai Ji
2022-02-22 18:55                         ` Zhang, Roy Fan
2022-02-22 18:23                       ` [EXT] [dpdk-dev v10 0/9] drivers/qat: QAT symmetric crypto datapatch rework Akhil Goyal
2022-02-22 20:30                       ` [PATCH v11 " Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 1/9] common/qat: define build request and dequeue ops Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 2/9] crypto/qat: support symmetric build op request Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 3/9] crypto/qat: rework session functions Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 4/9] crypto/qat: rework asymmetric op build operation Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 5/9] crypto/qat: unify symmetric functions Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 6/9] crypto/qat: unify asymmetric functions Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 7/9] crypto/qat: rework burst data path Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 8/9] crypto/qat: unify raw data path functions Fan Zhang
2022-02-22 20:30                         ` [PATCH v11 9/9] crypto/qat: support out of place SG list Fan Zhang
2022-02-23  0:49                         ` [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Kai Ji
2022-02-23  0:49                           ` [dpdk-dev v12 1/9] common/qat: define build request and dequeue ops Kai Ji
2022-02-23  0:49                           ` [dpdk-dev v12 2/9] crypto/qat: support symmetric build op request Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 3/9] crypto/qat: rework session functions Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 4/9] crypto/qat: rework asymmetric op build operation Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 5/9] crypto/qat: unify symmetric functions Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 6/9] crypto/qat: unify asymmetric functions Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 7/9] crypto/qat: rework burst data path Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 8/9] crypto/qat: unify raw data path functions Kai Ji
2022-02-23  0:50                           ` [dpdk-dev v12 9/9] crypto/qat: support out of place SG list Kai Ji
2022-02-23  9:18                           ` [EXT] [dpdk-dev v12 0/9] drivers/qat: QAT symmetric crypto datapatch rework Akhil Goyal

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211105001932.28784-4-kai.ji@intel.com \
    --to=kai.ji@intel.com \
    --cc=dev@dpdk.org \
    --cc=gakhil@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).